[clang] 1972fb2 - [Clang][RISCV] Use poison instead of undef

via cfe-commits cfe-commits at lists.llvm.org
Wed Dec 28 23:15:50 PST 2022


Author: eopXD
Date: 2022-12-28T23:15:31-08:00
New Revision: 1972fb23cb36cee787e585c5df1c68812cceb324

URL: https://github.com/llvm/llvm-project/commit/1972fb23cb36cee787e585c5df1c68812cceb324
DIFF: https://github.com/llvm/llvm-project/commit/1972fb23cb36cee787e585c5df1c68812cceb324.diff

LOG: [Clang][RISCV] Use poison instead of undef

Reviewed By: khchen

Differential Revision: https://reviews.llvm.org/D140687

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
    clang/utils/TableGen/RISCVVEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
index 73e2ce770d213..5719f77a376d8 100644
--- a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
+++ b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
@@ -25,7 +25,7 @@ vint8m1_t vadd_generic (vint8m1_t op0, vint8m1_t op1, size_t op2);
 // CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i8>, ptr [[OP0_ADDR]], align 1
 // CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i8>, ptr [[OP1_ADDR]], align 1
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[VL_ADDR]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[TMP0]], <vscale x 8 x i8> [[TMP1]], i64 [[TMP2]])
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[TMP0]], <vscale x 8 x i8> [[TMP1]], i64 [[TMP2]])
 // CHECK-NEXT:    store <vscale x 8 x i8> [[TMP3]], ptr [[RET]], align 1
 // CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 8 x i8>, ptr [[RET]], align 1
 // CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP4]]

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
index 3342a8e257e05..09925c02c1ed3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
index 8d42ca1e5f592..ae66906d8dba6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
@@ -16,7 +16,7 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
@@ -26,7 +26,7 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
@@ -36,7 +36,7 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
@@ -46,7 +46,7 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
@@ -56,7 +56,7 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
@@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
@@ -76,7 +76,7 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
@@ -86,7 +86,7 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
@@ -96,7 +96,7 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
@@ -106,7 +106,7 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
@@ -116,7 +116,7 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
@@ -126,7 +126,7 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
@@ -136,7 +136,7 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
@@ -146,7 +146,7 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
@@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
@@ -166,7 +166,7 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
@@ -176,7 +176,7 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
@@ -186,7 +186,7 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
@@ -196,7 +196,7 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
@@ -206,7 +206,7 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
@@ -216,7 +216,7 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
@@ -226,7 +226,7 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
@@ -236,7 +236,7 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
@@ -246,7 +246,7 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
@@ -256,7 +256,7 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
@@ -266,7 +266,7 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
@@ -276,7 +276,7 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
@@ -286,7 +286,7 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
@@ -296,7 +296,7 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
@@ -306,7 +306,7 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
@@ -316,7 +316,7 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
@@ -326,7 +326,7 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
@@ -336,7 +336,7 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
@@ -346,7 +346,7 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
@@ -356,7 +356,7 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
@@ -366,7 +366,7 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
@@ -376,7 +376,7 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
@@ -386,7 +386,7 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
@@ -396,7 +396,7 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
@@ -406,7 +406,7 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
@@ -416,7 +416,7 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
@@ -426,7 +426,7 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
@@ -436,7 +436,7 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
@@ -446,7 +446,7 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -456,7 +456,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
@@ -466,7 +466,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -476,7 +476,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
@@ -486,7 +486,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
@@ -496,7 +496,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
@@ -506,7 +506,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
@@ -516,7 +516,7 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
@@ -526,7 +526,7 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
@@ -536,7 +536,7 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
@@ -546,7 +546,7 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
@@ -556,7 +556,7 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
@@ -566,7 +566,7 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
@@ -576,7 +576,7 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
@@ -586,7 +586,7 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -596,7 +596,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
@@ -606,7 +606,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -616,7 +616,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
@@ -626,7 +626,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -636,7 +636,7 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
@@ -646,7 +646,7 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -656,7 +656,7 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
@@ -666,7 +666,7 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -676,7 +676,7 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
@@ -686,7 +686,7 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -696,7 +696,7 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
@@ -706,7 +706,7 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -716,7 +716,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
@@ -726,7 +726,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
@@ -736,7 +736,7 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
@@ -746,7 +746,7 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
@@ -756,7 +756,7 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
@@ -766,7 +766,7 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
@@ -776,7 +776,7 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
@@ -786,7 +786,7 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
@@ -796,7 +796,7 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
@@ -806,7 +806,7 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
@@ -816,7 +816,7 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
@@ -826,7 +826,7 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
@@ -836,7 +836,7 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
@@ -846,7 +846,7 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
@@ -856,7 +856,7 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
@@ -866,7 +866,7 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
@@ -876,7 +876,7 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
@@ -922,7 +922,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
@@ -931,7 +931,7 @@ vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
@@ -940,7 +940,7 @@ vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carr
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
@@ -949,7 +949,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
index 5929c0b9685aa..f56e7c7fca93b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1627,7 +1627,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -1636,7 +1636,7 @@ vint8mf8_t test_vadd_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -1645,7 +1645,7 @@ vint8mf8_t test_vadd_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1654,7 +1654,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -1753,7 +1753,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, s
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1762,7 +1762,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
index 177a47bd59733..f7ddd01e78e5f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
index 3334860c16858..1b9608f30deaf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
index 5697b4bc12e92..36e439e25fb5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
index 4bf73fa752e23..1f9c4e853fef5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -620,7 +620,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
index 19cc4d332029d..1de85c6c6b3a6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t op1, siz
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vflo
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
index 91105d1815153..bdd47a2d3c563 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
@@ -16,7 +16,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
@@ -25,7 +25,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
@@ -34,7 +34,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
@@ -43,7 +43,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
@@ -52,7 +52,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
@@ -61,7 +61,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
@@ -70,7 +70,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
@@ -79,7 +79,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
@@ -88,7 +88,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
@@ -97,7 +97,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
@@ -106,7 +106,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
@@ -115,7 +115,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
@@ -124,7 +124,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
@@ -133,7 +133,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
@@ -142,7 +142,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
@@ -151,7 +151,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
@@ -160,7 +160,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
@@ -169,7 +169,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
@@ -178,7 +178,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
@@ -187,7 +187,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
@@ -196,7 +196,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
@@ -205,7 +205,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
@@ -223,7 +223,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
@@ -232,7 +232,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
@@ -241,7 +241,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
@@ -259,7 +259,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
@@ -268,7 +268,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
@@ -277,7 +277,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
@@ -286,7 +286,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
@@ -295,7 +295,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
@@ -304,7 +304,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
@@ -313,7 +313,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
@@ -322,7 +322,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
@@ -331,7 +331,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
@@ -340,7 +340,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
@@ -349,7 +349,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
@@ -358,7 +358,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
@@ -367,7 +367,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
@@ -376,7 +376,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
@@ -385,7 +385,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
@@ -394,7 +394,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
@@ -403,7 +403,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
@@ -412,7 +412,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
@@ -421,7 +421,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
@@ -430,7 +430,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
@@ -439,7 +439,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
@@ -448,7 +448,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
@@ -457,7 +457,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
@@ -466,7 +466,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
@@ -475,7 +475,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
@@ -484,7 +484,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
@@ -1089,7 +1089,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t merge, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1098,7 +1098,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1107,7 +1107,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1116,7 +1116,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1125,7 +1125,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) {
@@ -1134,7 +1134,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_ta(vuint32mf2_t src, size_t vl) {
@@ -1251,7 +1251,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge,
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1260,7 +1260,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1269,7 +1269,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1278,7 +1278,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, si
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1287,7 +1287,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -1296,7 +1296,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
index 21a7d5af53da2..93a393899c59c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -385,7 +385,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -439,7 +439,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
index 38497916964db..53f92ec9b4756 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -385,7 +385,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -439,7 +439,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
index 306c14055ab5d..cb40c4f615f1f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -385,7 +385,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -439,7 +439,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
index 764757c207fe1..218b1e6abf046 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -385,7 +385,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -439,7 +439,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
index bead163eb253a..e5de4acab46f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
@@ -17,7 +17,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
@@ -26,7 +26,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
@@ -35,7 +35,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
@@ -44,7 +44,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
@@ -53,7 +53,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
@@ -62,7 +62,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
@@ -71,7 +71,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
@@ -80,7 +80,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
@@ -89,7 +89,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
@@ -98,7 +98,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
@@ -107,7 +107,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
@@ -116,7 +116,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
@@ -125,7 +125,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
@@ -134,7 +134,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
@@ -143,7 +143,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
@@ -152,7 +152,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
@@ -161,7 +161,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
@@ -170,7 +170,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
@@ -197,7 +197,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
@@ -206,7 +206,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
@@ -215,7 +215,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
@@ -224,7 +224,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
@@ -233,7 +233,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
@@ -242,7 +242,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
@@ -251,7 +251,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
@@ -260,7 +260,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
@@ -269,7 +269,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
@@ -278,7 +278,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
@@ -287,7 +287,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
@@ -296,7 +296,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
@@ -305,7 +305,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
@@ -314,7 +314,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
@@ -323,7 +323,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
@@ -332,7 +332,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
@@ -341,7 +341,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
@@ -350,7 +350,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
@@ -359,7 +359,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
@@ -368,7 +368,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
@@ -377,7 +377,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
@@ -386,7 +386,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
@@ -395,7 +395,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
@@ -404,7 +404,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
@@ -413,7 +413,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
@@ -431,7 +431,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
@@ -449,7 +449,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
@@ -458,7 +458,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
@@ -467,7 +467,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1093,7 +1093,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1102,7 +1102,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1111,7 +1111,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1120,7 +1120,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) {
@@ -1129,7 +1129,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) {
@@ -1138,7 +1138,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1147,7 +1147,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1300,7 +1300,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t me
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1309,7 +1309,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1318,7 +1318,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1327,7 +1327,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t sr
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -1345,7 +1345,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
@@ -1354,7 +1354,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1363,7 +1363,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
index 4c82a7fb9fab4..f4fc413a52f8c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -187,7 +187,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, f
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
index de8d434df12b7..f4a9ddf81046d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
index 2a7b9e3b71641..18918841bff8e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
@@ -206,7 +206,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -224,7 +224,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merg
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
index f566d18c5771c..f844a5f8fcb45 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
@@ -206,7 +206,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -224,7 +224,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merg
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
index 75143020657b4..c757e97315c01 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -404,7 +404,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mer
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -458,7 +458,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mer
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
index f672d59ca79e8..17bef917dc558 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
index 4740865d99a38..f2c35fd6670d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -187,7 +187,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, f
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
index 188326db31dbf..aae5b61bca774 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -178,7 +178,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -188,7 +188,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -207,7 +207,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -216,7 +216,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -226,7 +226,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -235,7 +235,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -245,7 +245,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -254,7 +254,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -264,7 +264,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -273,7 +273,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -292,7 +292,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -302,7 +302,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -311,7 +311,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -321,7 +321,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -340,7 +340,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -349,7 +349,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -359,7 +359,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -378,7 +378,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -387,7 +387,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -397,7 +397,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -406,7 +406,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -416,7 +416,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -425,7 +425,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -435,7 +435,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -444,7 +444,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -454,7 +454,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -473,7 +473,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -482,7 +482,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -492,7 +492,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -501,7 +501,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -511,7 +511,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -1143,7 +1143,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1152,7 +1152,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, siz
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1161,7 +1161,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1170,7 +1170,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, si
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1179,7 +1179,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1188,7 +1188,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, si
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1305,7 +1305,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1314,7 +1314,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vflo
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1323,7 +1323,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, floa
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1332,7 +1332,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1341,7 +1341,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, flo
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1350,7 +1350,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
index 3f0a7ec666908..ba7e1767bec96 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
@@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
@@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
@@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
@@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
@@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
@@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
@@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) {
@@ -238,7 +238,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mer
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
index ec60e62b78175..d661d6f5863e9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
@@ -27,7 +27,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
@@ -37,7 +37,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
@@ -47,7 +47,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
@@ -57,7 +57,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
@@ -67,7 +67,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
@@ -77,7 +77,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
@@ -87,7 +87,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
@@ -206,7 +206,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t sr
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) {
@@ -233,7 +233,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
index 0c93dbe3f3c59..aa14974b7027b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -187,7 +187,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -214,7 +214,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
index af104e69c56e4..8020a1763f078 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -93,7 +93,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -131,7 +131,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -159,7 +159,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -169,7 +169,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -385,7 +385,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -439,7 +439,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
index 5d24d5737398e..08b5de5adb825 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -36,7 +36,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -55,7 +55,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -74,7 +74,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -93,7 +93,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -112,7 +112,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -131,7 +131,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -363,7 +363,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -372,7 +372,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -381,7 +381,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) {
@@ -471,7 +471,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -489,7 +489,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -498,7 +498,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat3
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
index 206556d44cc48..138582d28b6ba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
@@ -16,7 +16,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
@@ -25,7 +25,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
@@ -34,7 +34,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
@@ -43,7 +43,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
@@ -52,7 +52,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
@@ -61,7 +61,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
@@ -70,7 +70,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
@@ -79,7 +79,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
@@ -88,7 +88,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
@@ -97,7 +97,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
@@ -106,7 +106,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
@@ -115,7 +115,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
@@ -124,7 +124,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
@@ -133,7 +133,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
@@ -142,7 +142,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
@@ -151,7 +151,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
@@ -160,7 +160,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
@@ -169,7 +169,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
@@ -178,7 +178,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
@@ -187,7 +187,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
@@ -196,7 +196,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
@@ -205,7 +205,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
@@ -214,7 +214,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
@@ -223,7 +223,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
@@ -232,7 +232,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
@@ -241,7 +241,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
@@ -250,7 +250,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
@@ -259,7 +259,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
@@ -268,7 +268,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
@@ -277,7 +277,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
@@ -286,7 +286,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
@@ -295,7 +295,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
@@ -304,7 +304,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
@@ -313,7 +313,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
@@ -322,7 +322,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
@@ -331,7 +331,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
@@ -340,7 +340,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
@@ -797,7 +797,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -806,7 +806,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -815,7 +815,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -824,7 +824,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -833,7 +833,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) {
@@ -842,7 +842,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) {
@@ -851,7 +851,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -986,7 +986,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -995,7 +995,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1004,7 +1004,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -1031,7 +1031,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
@@ -1040,7 +1040,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
index de89639d36dab..8ceeb39e44990 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -36,7 +36,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -55,7 +55,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -74,7 +74,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -185,7 +185,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, floa
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -194,7 +194,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -239,7 +239,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -248,7 +248,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
index 92312e889e728..f7cf04e8b8efe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -236,7 +236,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t merge, vfloat32mf2
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -254,7 +254,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t m
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -272,7 +272,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t me
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
index ba0b51c4f23b2..1fb2f3f8a5520 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -17,7 +17,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -36,7 +36,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -45,7 +45,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -55,7 +55,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -64,7 +64,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -74,7 +74,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -83,7 +83,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -93,7 +93,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -102,7 +102,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -112,7 +112,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -121,7 +121,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -131,7 +131,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -150,7 +150,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -363,7 +363,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -372,7 +372,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -381,7 +381,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) {
@@ -471,7 +471,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -489,7 +489,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -498,7 +498,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat3
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
index 725b868b06176..71d0fc3cac88b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -79,7 +79,7 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -88,7 +88,7 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -97,7 +97,7 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -106,7 +106,7 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -115,7 +115,7 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -124,7 +124,7 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -133,7 +133,7 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -142,7 +142,7 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -151,7 +151,7 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -160,7 +160,7 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -169,7 +169,7 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -178,7 +178,7 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -187,7 +187,7 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -196,7 +196,7 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -205,7 +205,7 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -214,7 +214,7 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -223,7 +223,7 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -232,7 +232,7 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -241,7 +241,7 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -250,7 +250,7 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -259,7 +259,7 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -268,7 +268,7 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -277,7 +277,7 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -286,7 +286,7 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -295,7 +295,7 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -304,7 +304,7 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -313,7 +313,7 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -322,7 +322,7 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -331,7 +331,7 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -340,7 +340,7 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -349,7 +349,7 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -358,7 +358,7 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -367,7 +367,7 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -376,7 +376,7 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -385,7 +385,7 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -394,7 +394,7 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -403,7 +403,7 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -412,7 +412,7 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -421,7 +421,7 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -430,7 +430,7 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -439,7 +439,7 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -448,7 +448,7 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -457,7 +457,7 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -466,7 +466,7 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -475,7 +475,7 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -484,7 +484,7 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -493,7 +493,7 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -502,7 +502,7 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -511,7 +511,7 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -520,7 +520,7 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -529,7 +529,7 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -538,7 +538,7 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -547,7 +547,7 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -556,7 +556,7 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -565,7 +565,7 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -574,7 +574,7 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -583,7 +583,7 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -592,7 +592,7 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -601,7 +601,7 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -610,7 +610,7 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -619,7 +619,7 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -628,7 +628,7 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -637,7 +637,7 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -646,7 +646,7 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -655,7 +655,7 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -664,7 +664,7 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -673,7 +673,7 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -682,7 +682,7 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -691,7 +691,7 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -700,7 +700,7 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -709,7 +709,7 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -718,7 +718,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -727,7 +727,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -736,7 +736,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -745,7 +745,7 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -754,7 +754,7 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -763,7 +763,7 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -772,7 +772,7 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -781,7 +781,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -790,7 +790,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -799,7 +799,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -808,7 +808,7 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -817,7 +817,7 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -826,7 +826,7 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -835,7 +835,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -844,7 +844,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -853,7 +853,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -862,7 +862,7 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -871,7 +871,7 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -880,7 +880,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -889,7 +889,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -898,7 +898,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -907,7 +907,7 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -916,7 +916,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -925,7 +925,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -934,7 +934,7 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -943,7 +943,7 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -952,7 +952,7 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -961,7 +961,7 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -970,7 +970,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -979,7 +979,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -988,7 +988,7 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -997,7 +997,7 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1006,7 +1006,7 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -1015,7 +1015,7 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1024,7 +1024,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1033,7 +1033,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1042,7 +1042,7 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1051,7 +1051,7 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1060,7 +1060,7 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1069,7 +1069,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1078,7 +1078,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1088,7 +1088,7 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1097,7 +1097,7 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1106,7 +1106,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1115,7 +1115,7 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1124,7 +1124,7 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1133,7 +1133,7 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -1142,7 +1142,7 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1151,7 +1151,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1160,7 +1160,7 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1169,7 +1169,7 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1178,7 +1178,7 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1187,7 +1187,7 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1196,7 +1196,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1205,7 +1205,7 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1214,7 +1214,7 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1223,7 +1223,7 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1232,7 +1232,7 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1241,7 +1241,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1250,7 +1250,7 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1259,7 +1259,7 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1268,7 +1268,7 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1277,7 +1277,7 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1286,7 +1286,7 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1295,7 +1295,7 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1304,7 +1304,7 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1313,7 +1313,7 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1322,7 +1322,7 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1331,7 +1331,7 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1340,7 +1340,7 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1349,7 +1349,7 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1358,7 +1358,7 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1367,7 +1367,7 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1376,7 +1376,7 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1385,7 +1385,7 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1394,7 +1394,7 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1403,7 +1403,7 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1412,7 +1412,7 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
@@ -1421,7 +1421,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
@@ -1430,7 +1430,7 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
@@ -1439,7 +1439,7 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
@@ -1448,7 +1448,7 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
@@ -1457,7 +1457,7 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
@@ -1466,7 +1466,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
@@ -1475,7 +1475,7 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
@@ -1484,7 +1484,7 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
@@ -1493,7 +1493,7 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
@@ -1502,7 +1502,7 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -1511,7 +1511,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
@@ -1520,7 +1520,7 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
@@ -1529,7 +1529,7 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
@@ -1538,7 +1538,7 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
@@ -1547,7 +1547,7 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
@@ -1556,7 +1556,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
@@ -1565,7 +1565,7 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
@@ -1574,7 +1574,7 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
@@ -1583,7 +1583,7 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
@@ -1592,7 +1592,7 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
@@ -1601,7 +1601,7 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
@@ -1610,7 +1610,7 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
@@ -1619,7 +1619,7 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
@@ -1628,7 +1628,7 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
@@ -1637,7 +1637,7 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
@@ -1646,7 +1646,7 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
@@ -1655,7 +1655,7 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
@@ -1664,7 +1664,7 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
@@ -1673,7 +1673,7 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
@@ -1682,7 +1682,7 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
@@ -1691,7 +1691,7 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
@@ -1700,7 +1700,7 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
@@ -1709,7 +1709,7 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
@@ -1718,7 +1718,7 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
@@ -3473,7 +3473,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t merge, const float *base,
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3482,7 +3482,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3491,7 +3491,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2_ta(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -3554,7 +3554,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, c
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3563,7 +3563,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vui
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3572,7 +3572,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, v
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2_tama(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
index 103b19df6b0f4..57ef76b9d6cae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -43,7 +43,7 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -52,7 +52,7 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -61,7 +61,7 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -70,7 +70,7 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -79,7 +79,7 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -88,7 +88,7 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -97,7 +97,7 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -106,7 +106,7 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -115,7 +115,7 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -124,7 +124,7 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -133,7 +133,7 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -142,7 +142,7 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -151,7 +151,7 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -160,7 +160,7 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -169,7 +169,7 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -178,7 +178,7 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -187,7 +187,7 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -196,7 +196,7 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -205,7 +205,7 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -214,7 +214,7 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -223,7 +223,7 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -232,7 +232,7 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -241,7 +241,7 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -250,7 +250,7 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -259,7 +259,7 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -268,7 +268,7 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -277,7 +277,7 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -286,7 +286,7 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -295,7 +295,7 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -304,7 +304,7 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -313,7 +313,7 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -322,7 +322,7 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -331,7 +331,7 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -340,7 +340,7 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -349,7 +349,7 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -358,7 +358,7 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -367,7 +367,7 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -376,7 +376,7 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -385,7 +385,7 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -394,7 +394,7 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -403,7 +403,7 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -412,7 +412,7 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -421,7 +421,7 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -430,7 +430,7 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -439,7 +439,7 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -448,7 +448,7 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -457,7 +457,7 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -466,7 +466,7 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -475,7 +475,7 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -484,7 +484,7 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -493,7 +493,7 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -502,7 +502,7 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -511,7 +511,7 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -520,7 +520,7 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -529,7 +529,7 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -538,7 +538,7 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -547,7 +547,7 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -556,7 +556,7 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -565,7 +565,7 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -574,7 +574,7 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -583,7 +583,7 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -592,7 +592,7 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -601,7 +601,7 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -610,7 +610,7 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -619,7 +619,7 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -628,7 +628,7 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -637,7 +637,7 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -646,7 +646,7 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -655,7 +655,7 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -664,7 +664,7 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -673,7 +673,7 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -682,7 +682,7 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -691,7 +691,7 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -700,7 +700,7 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -709,7 +709,7 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -718,7 +718,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -727,7 +727,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -736,7 +736,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -745,7 +745,7 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -754,7 +754,7 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -763,7 +763,7 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -772,7 +772,7 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -781,7 +781,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -790,7 +790,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -799,7 +799,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -808,7 +808,7 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -817,7 +817,7 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -826,7 +826,7 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -835,7 +835,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -844,7 +844,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -853,7 +853,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -862,7 +862,7 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -871,7 +871,7 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -880,7 +880,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -889,7 +889,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -898,7 +898,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -907,7 +907,7 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -916,7 +916,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -925,7 +925,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -934,7 +934,7 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -943,7 +943,7 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -952,7 +952,7 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -961,7 +961,7 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -970,7 +970,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -979,7 +979,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -988,7 +988,7 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -997,7 +997,7 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1006,7 +1006,7 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -1015,7 +1015,7 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1024,7 +1024,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1033,7 +1033,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1042,7 +1042,7 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1051,7 +1051,7 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1060,7 +1060,7 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1069,7 +1069,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1078,7 +1078,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1087,7 +1087,7 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1096,7 +1096,7 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1105,7 +1105,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1114,7 +1114,7 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1123,7 +1123,7 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1132,7 +1132,7 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -1141,7 +1141,7 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1150,7 +1150,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1159,7 +1159,7 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1168,7 +1168,7 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1177,7 +1177,7 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1186,7 +1186,7 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1195,7 +1195,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1204,7 +1204,7 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1213,7 +1213,7 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1222,7 +1222,7 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1231,7 +1231,7 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1240,7 +1240,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1249,7 +1249,7 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1258,7 +1258,7 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1267,7 +1267,7 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1276,7 +1276,7 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1285,7 +1285,7 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1294,7 +1294,7 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1303,7 +1303,7 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1312,7 +1312,7 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1321,7 +1321,7 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1330,7 +1330,7 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1339,7 +1339,7 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1348,7 +1348,7 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1357,7 +1357,7 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1366,7 +1366,7 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1375,7 +1375,7 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1384,7 +1384,7 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1393,7 +1393,7 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1402,7 +1402,7 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1411,7 +1411,7 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
@@ -1420,7 +1420,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
@@ -1429,7 +1429,7 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
@@ -1438,7 +1438,7 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
@@ -1447,7 +1447,7 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
@@ -1456,7 +1456,7 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
@@ -1465,7 +1465,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
@@ -1474,7 +1474,7 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
@@ -1483,7 +1483,7 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
@@ -1492,7 +1492,7 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
@@ -1501,7 +1501,7 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -1510,7 +1510,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
@@ -1519,7 +1519,7 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
@@ -1528,7 +1528,7 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
@@ -1537,7 +1537,7 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
@@ -1546,7 +1546,7 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
@@ -1555,7 +1555,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
@@ -1564,7 +1564,7 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
@@ -1573,7 +1573,7 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
@@ -1582,7 +1582,7 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
@@ -1591,7 +1591,7 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
@@ -1600,7 +1600,7 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
@@ -1609,7 +1609,7 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
@@ -1618,7 +1618,7 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
@@ -1627,7 +1627,7 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
@@ -1636,7 +1636,7 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
@@ -1645,7 +1645,7 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
@@ -1654,7 +1654,7 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
@@ -1663,7 +1663,7 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
@@ -1672,7 +1672,7 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
@@ -1681,7 +1681,7 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
@@ -1690,7 +1690,7 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
@@ -1699,7 +1699,7 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
@@ -1708,7 +1708,7 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
@@ -1717,7 +1717,7 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
@@ -3473,7 +3473,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t merge, const float *base,
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3482,7 +3482,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3491,7 +3491,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2_ta(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -3554,7 +3554,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, c
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3563,7 +3563,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vui
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3572,7 +3572,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, v
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2_tama(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
index c0f7b97ef8ce8..1e28d73e9ae20 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
index 511e79b462996..366f5a1119728 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
@@ -364,7 +364,7 @@ vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
index 49593a1518e93..7388f27001433 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
@@ -332,7 +332,7 @@ vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
 
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -341,7 +341,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
index 5bdfe28d931d1..eb8ca1798e32b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
@@ -332,7 +332,7 @@ vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
 
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -341,7 +341,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
index 90df740028a90..a00a88478217f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
@@ -364,7 +364,7 @@ vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
index de183e3a7af35..7ffc357139fa6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
@@ -364,7 +364,7 @@ vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
index ddba4a196dd75..63a3bb5da1e3e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
@@ -364,7 +364,7 @@ vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
index 5410969742442..1b8703ca06284 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
index 56e4974259904..5e591e9f95a32 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
@@ -125,7 +125,7 @@ vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
index 7d6a5c3c882e3..b39efb2d75c21 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
@@ -1702,7 +1702,7 @@ vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1711,7 +1711,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1720,7 +1720,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1729,7 +1729,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
index 7166360d64e96..2a41180844bf2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
@@ -1591,7 +1591,7 @@ vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
 
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1600,7 +1600,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1609,7 +1609,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1618,7 +1618,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
index fb3a1f2f7d6fa..bdb04552f8995 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
@@ -1591,7 +1591,7 @@ vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
 
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1600,7 +1600,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1609,7 +1609,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1618,7 +1618,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
index f8101c4d57c68..09e185a5a7a28 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
@@ -125,7 +125,7 @@ vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsif_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
index db6b5d35536dd..10499ac8f9ed3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
@@ -1717,7 +1717,7 @@ vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1726,7 +1726,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
index 35887bc6e639a..c5a9028bde8a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
@@ -1717,7 +1717,7 @@ vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1726,7 +1726,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
index f9b095fcbf973..7be2e340f3a1f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
@@ -1702,7 +1702,7 @@ vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1711,7 +1711,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1720,7 +1720,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1729,7 +1729,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
index e24ffe32f7ede..a925d1672caa6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
@@ -125,7 +125,7 @@ vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsof_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
index 4a010385d1405..5f50abb4a75e2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
@@ -9,7 +9,7 @@
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
@@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
@@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
index 3e8d384b09ca4..eb21b0d9413a5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
@@ -3624,7 +3624,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3633,7 +3633,7 @@ vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3642,7 +3642,7 @@ vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3651,7 +3651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3660,7 +3660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3669,7 +3669,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3678,7 +3678,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3687,7 +3687,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3696,7 +3696,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3705,7 +3705,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2_ta(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3894,7 +3894,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint3
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3903,7 +3903,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3912,7 +3912,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3921,7 +3921,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3930,7 +3930,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3939,7 +3939,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3948,7 +3948,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3957,7 +3957,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3966,7 +3966,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3975,7 +3975,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
index 6817a439fa720..6e30585778bef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
@@ -34,35 +34,35 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); }
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); }
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); }
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { return vmv_v(src, vl); }
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
@@ -71,7 +71,7 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
@@ -80,7 +80,7 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
@@ -89,7 +89,7 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
@@ -98,7 +98,7 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
@@ -107,7 +107,7 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
@@ -116,7 +116,7 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
@@ -125,7 +125,7 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
@@ -134,7 +134,7 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
@@ -143,7 +143,7 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
@@ -152,7 +152,7 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
@@ -161,7 +161,7 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
@@ -170,7 +170,7 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
@@ -179,7 +179,7 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
@@ -188,7 +188,7 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
@@ -197,7 +197,7 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
@@ -206,7 +206,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
@@ -215,7 +215,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
@@ -224,7 +224,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
@@ -233,7 +233,7 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
@@ -242,7 +242,7 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
@@ -251,7 +251,7 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
@@ -260,7 +260,7 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
@@ -269,7 +269,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
@@ -278,7 +278,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
@@ -287,7 +287,7 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
@@ -296,7 +296,7 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
@@ -305,7 +305,7 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
@@ -314,7 +314,7 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
@@ -323,7 +323,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
@@ -332,7 +332,7 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
@@ -341,7 +341,7 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
@@ -350,7 +350,7 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
@@ -359,7 +359,7 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
@@ -368,7 +368,7 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
@@ -377,7 +377,7 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
@@ -386,7 +386,7 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
@@ -395,7 +395,7 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
@@ -404,7 +404,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
@@ -413,7 +413,7 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
@@ -431,7 +431,7 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
@@ -449,7 +449,7 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
@@ -458,7 +458,7 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
@@ -467,7 +467,7 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
@@ -1216,7 +1216,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t merge, uint32_t src, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_v_i32mf2_ta(vint32mf2_t src, size_t vl) {
@@ -1225,7 +1225,7 @@ vint32mf2_t test_vmv_v_v_i32mf2_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_v_u32mf2_ta(vuint32mf2_t src, size_t vl) {
@@ -1243,7 +1243,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vmv_v_v_f32mf2_ta(vfloat32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
index 131e1d0848a9d..dd89ebedf8a0a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
@@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
@@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
@@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
@@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
@@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
@@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
@@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
@@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
@@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
@@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
@@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
@@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
@@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
@@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -1227,7 +1227,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1236,7 +1236,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
@@ -1245,7 +1245,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1254,7 +1254,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -1335,7 +1335,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vui
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1344,7 +1344,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
@@ -1353,7 +1353,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1362,7 +1362,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
index cf45bfa1fe790..3d960da97b0f1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
index f170acaff5e3e..6fa5a29d21fbf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
index 0f5eba3b353b2..bcf66d9b4bcc6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
index b3a89e7227325..de65b46b93cb6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
index dd2c2a314e1d3..46edae298cebd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vec
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
index a9b9d15a53046..faed7baa45d82 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vec
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
index 30f69f1e81e68..6e7096cbd5c23 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vecto
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar,
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, v
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, v
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
index d57f4339445aa..4be88fea63fbb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
index 75440bfe76156..100a0c91aa6a9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
index 25c3d95f4a778..46c1d045ff3c7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
index 306f1d90194a6..dfb37498277e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
@@ -17,7 +17,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
@@ -36,7 +36,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
@@ -45,7 +45,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
@@ -55,7 +55,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
@@ -64,7 +64,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -73,7 +73,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
@@ -82,7 +82,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -91,7 +91,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
@@ -100,7 +100,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -109,7 +109,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
@@ -118,7 +118,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -127,7 +127,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
@@ -136,7 +136,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
@@ -146,7 +146,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
@@ -155,7 +155,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
@@ -165,7 +165,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
@@ -174,7 +174,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
@@ -184,7 +184,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
@@ -193,7 +193,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
@@ -203,7 +203,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
@@ -212,7 +212,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
@@ -222,7 +222,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
@@ -231,7 +231,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
@@ -241,7 +241,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
@@ -250,7 +250,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
@@ -260,7 +260,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
@@ -269,7 +269,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
@@ -279,7 +279,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
@@ -288,7 +288,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
@@ -298,7 +298,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
@@ -307,7 +307,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
@@ -317,7 +317,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
@@ -326,7 +326,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
@@ -336,7 +336,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
@@ -345,7 +345,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
@@ -355,7 +355,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
@@ -364,7 +364,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
@@ -374,7 +374,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
@@ -383,7 +383,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
@@ -393,7 +393,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
@@ -402,7 +402,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
@@ -412,7 +412,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
@@ -421,7 +421,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
@@ -431,7 +431,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
@@ -440,7 +440,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
@@ -450,7 +450,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
@@ -459,7 +459,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
@@ -469,7 +469,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
@@ -478,7 +478,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -487,7 +487,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
@@ -496,7 +496,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -505,7 +505,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
@@ -514,7 +514,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -523,7 +523,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
@@ -532,7 +532,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -541,7 +541,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
@@ -550,7 +550,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
@@ -560,7 +560,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
@@ -570,7 +570,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
@@ -580,7 +580,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
@@ -590,7 +590,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
@@ -600,7 +600,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
@@ -619,7 +619,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
@@ -638,7 +638,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
@@ -647,7 +647,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
@@ -657,7 +657,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
@@ -666,7 +666,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
@@ -676,7 +676,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
@@ -686,7 +686,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
@@ -696,7 +696,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
@@ -705,7 +705,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
@@ -715,7 +715,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
@@ -724,7 +724,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
@@ -734,7 +734,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
@@ -743,7 +743,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
@@ -753,7 +753,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
@@ -762,7 +762,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
@@ -772,7 +772,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
@@ -791,7 +791,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
@@ -800,7 +800,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
@@ -810,7 +810,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
@@ -819,7 +819,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
@@ -829,7 +829,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
@@ -838,7 +838,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
@@ -848,7 +848,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
@@ -858,7 +858,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
@@ -868,7 +868,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
@@ -877,7 +877,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
@@ -887,7 +887,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
@@ -896,7 +896,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
@@ -906,7 +906,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
@@ -915,7 +915,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
@@ -925,7 +925,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
@@ -934,7 +934,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
@@ -944,7 +944,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
@@ -953,7 +953,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
@@ -963,7 +963,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
@@ -972,7 +972,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
@@ -982,7 +982,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
@@ -991,7 +991,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
@@ -1001,7 +1001,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
@@ -1010,7 +1010,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
@@ -1020,7 +1020,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
@@ -1030,7 +1030,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
@@ -1040,7 +1040,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
@@ -1077,7 +1077,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
@@ -1087,7 +1087,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
@@ -1097,7 +1097,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
@@ -1107,7 +1107,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
@@ -1117,7 +1117,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
@@ -1127,7 +1127,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
@@ -1137,7 +1137,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
@@ -1147,7 +1147,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
@@ -1157,7 +1157,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
@@ -1167,7 +1167,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
@@ -1177,7 +1177,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
@@ -1187,7 +1187,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
@@ -1197,7 +1197,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
@@ -1207,7 +1207,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
@@ -1217,7 +1217,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
@@ -1227,7 +1227,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
@@ -1247,7 +1247,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
@@ -1267,7 +1267,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
@@ -1277,7 +1277,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -1287,7 +1287,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -1297,7 +1297,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -1307,7 +1307,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -1317,7 +1317,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -1327,7 +1327,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -1337,7 +1337,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -1347,7 +1347,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
@@ -1357,7 +1357,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
@@ -1367,7 +1367,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
@@ -1377,7 +1377,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
@@ -1387,7 +1387,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
@@ -1397,7 +1397,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
@@ -1407,7 +1407,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
@@ -1417,7 +1417,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
@@ -1427,7 +1427,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
@@ -1437,7 +1437,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
@@ -1447,7 +1447,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
@@ -1457,7 +1457,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
@@ -1467,7 +1467,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
@@ -1477,7 +1477,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
@@ -1487,7 +1487,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
@@ -1497,7 +1497,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
@@ -1507,7 +1507,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
@@ -3292,7 +3292,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3301,7 +3301,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl) {
@@ -3310,7 +3310,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3319,7 +3319,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, si
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t vl) {
@@ -3328,7 +3328,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t v
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3337,7 +3337,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t vl) {
@@ -3346,7 +3346,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3355,7 +3355,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, si
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3364,7 +3364,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2_ta(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3535,7 +3535,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mer
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3544,7 +3544,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) {
@@ -3553,7 +3553,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3562,7 +3562,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) {
@@ -3571,7 +3571,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3580,7 +3580,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vu
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) {
@@ -3589,7 +3589,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3598,7 +3598,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vu
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3607,7 +3607,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
index 9f0c8a8928288..a37e626187b00 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
index d22b0372a748f..c99fd380e85ba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
index d9a39039420f1..7655c07730268 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
@@ -16,7 +16,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
@@ -26,7 +26,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
@@ -36,7 +36,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
@@ -46,7 +46,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
@@ -56,7 +56,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
@@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
@@ -76,7 +76,7 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
@@ -86,7 +86,7 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
@@ -96,7 +96,7 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
@@ -106,7 +106,7 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
@@ -116,7 +116,7 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
@@ -126,7 +126,7 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
@@ -136,7 +136,7 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
@@ -146,7 +146,7 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
@@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
@@ -166,7 +166,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
@@ -176,7 +176,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
@@ -186,7 +186,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
@@ -196,7 +196,7 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
@@ -206,7 +206,7 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
@@ -216,7 +216,7 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
@@ -226,7 +226,7 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
@@ -236,7 +236,7 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
@@ -246,7 +246,7 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
@@ -256,7 +256,7 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
@@ -266,7 +266,7 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
@@ -276,7 +276,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
@@ -286,7 +286,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
@@ -296,7 +296,7 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
@@ -306,7 +306,7 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
@@ -316,7 +316,7 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
@@ -326,7 +326,7 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
@@ -336,7 +336,7 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
@@ -346,7 +346,7 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
@@ -356,7 +356,7 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
@@ -366,7 +366,7 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
@@ -376,7 +376,7 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
@@ -386,7 +386,7 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
@@ -396,7 +396,7 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
@@ -406,7 +406,7 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
@@ -416,7 +416,7 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
@@ -426,7 +426,7 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
@@ -436,7 +436,7 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
@@ -446,7 +446,7 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -456,7 +456,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
@@ -466,7 +466,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -476,7 +476,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
@@ -486,7 +486,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
@@ -496,7 +496,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
@@ -506,7 +506,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
@@ -516,7 +516,7 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
@@ -526,7 +526,7 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
@@ -536,7 +536,7 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
@@ -546,7 +546,7 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
@@ -556,7 +556,7 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
@@ -566,7 +566,7 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
@@ -576,7 +576,7 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
@@ -586,7 +586,7 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -596,7 +596,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
@@ -606,7 +606,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -616,7 +616,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
@@ -626,7 +626,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -636,7 +636,7 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
@@ -646,7 +646,7 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -656,7 +656,7 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
@@ -666,7 +666,7 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -676,7 +676,7 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
@@ -686,7 +686,7 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -696,7 +696,7 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
@@ -706,7 +706,7 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -716,7 +716,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
@@ -726,7 +726,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
@@ -736,7 +736,7 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
@@ -746,7 +746,7 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
@@ -756,7 +756,7 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
@@ -766,7 +766,7 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
@@ -776,7 +776,7 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
@@ -786,7 +786,7 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
@@ -796,7 +796,7 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
@@ -806,7 +806,7 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
@@ -816,7 +816,7 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
@@ -826,7 +826,7 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
@@ -836,7 +836,7 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
@@ -846,7 +846,7 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
@@ -856,7 +856,7 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
@@ -866,7 +866,7 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
@@ -876,7 +876,7 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2,
@@ -922,7 +922,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) {
@@ -931,7 +931,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) {
@@ -940,7 +940,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borr
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) {
@@ -949,7 +949,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
index 05672fb7f2cd4..545d0c408e0f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
@@ -69,7 +69,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
@@ -78,7 +78,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
@@ -87,7 +87,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
@@ -96,7 +96,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
@@ -105,7 +105,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
@@ -114,7 +114,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
@@ -123,7 +123,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
@@ -132,7 +132,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
@@ -141,7 +141,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
@@ -159,7 +159,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
@@ -186,7 +186,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
@@ -195,7 +195,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
@@ -204,7 +204,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
@@ -213,7 +213,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
@@ -222,7 +222,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
@@ -231,7 +231,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
@@ -547,7 +547,7 @@ vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1_ta(vint32mf2_t op1, size_t vl) {
@@ -574,7 +574,7 @@ vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
index 724acefde3e2f..5ffe3e9677aba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
@@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
@@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
@@ -924,7 +924,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) {
@@ -933,7 +933,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) {
@@ -978,7 +978,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
@@ -987,7 +987,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
index 4d710a2f5e067..0a37ba90fd004 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
@@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
@@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
@@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
@@ -899,7 +899,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, u
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) {
@@ -908,7 +908,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t v
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) {
@@ -953,7 +953,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
@@ -962,7 +962,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
index d35c9b00fb2d8..4617d5ba259c4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shi
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
index 2d3fa6b498c48..9e996008cdded 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
index de9a1918a0798..d02f512338d82 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32m
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
index 4d64df874a3aa..12caaf98a1d15 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -816,7 +816,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -825,7 +825,7 @@ vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -870,7 +870,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -879,7 +879,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
index ff7af94d0d4f3..a108100400e49 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -816,7 +816,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -825,7 +825,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -870,7 +870,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -879,7 +879,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
index fd442fc20603a..f6161c4f28d46 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -866,7 +866,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t s
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -875,7 +875,7 @@ vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -920,7 +920,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32m
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -929,7 +929,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
index eccac98f07e16..9338c6d9fab22 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -881,7 +881,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -890,7 +890,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -935,7 +935,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -944,7 +944,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
index d0a49852b6892..b3413c42f1302 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
index 59e86900e2192..4505db9aa1685 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
index abbf6266a27fb..bb5e38e1619c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
@@ -2394,7 +2394,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2403,7 +2403,7 @@ vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2412,7 +2412,7 @@ vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2421,7 +2421,7 @@ vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2430,7 +2430,7 @@ vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2439,7 +2439,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2448,7 +2448,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2457,7 +2457,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1_ta(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -2610,7 +2610,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2619,7 +2619,7 @@ vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2628,7 +2628,7 @@ vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2637,7 +2637,7 @@ vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2646,7 +2646,7 @@ vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2655,7 +2655,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2664,7 +2664,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2673,7 +2673,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
index b768a93db389f..20c3f7aeb7d65 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1_ta(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1842,7 +1842,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1851,7 +1851,7 @@ vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1860,7 +1860,7 @@ vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1869,7 +1869,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1878,7 +1878,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1887,7 +1887,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
index 80f4ab527c032..e9be8e4da4e5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
@@ -780,7 +780,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t ve
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
@@ -789,7 +789,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scala
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
@@ -816,7 +816,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
@@ -825,7 +825,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
index 468a77068c676..f7c5c64228af0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
@@ -2394,7 +2394,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2403,7 +2403,7 @@ vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2412,7 +2412,7 @@ vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2421,7 +2421,7 @@ vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2430,7 +2430,7 @@ vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2439,7 +2439,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2448,7 +2448,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2457,7 +2457,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1_ta (vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -2610,7 +2610,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint6
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2619,7 +2619,7 @@ vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2628,7 +2628,7 @@ vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2637,7 +2637,7 @@ vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2646,7 +2646,7 @@ vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2655,7 +2655,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2664,7 +2664,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2673,7 +2673,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1_tama (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
index 2d26258214591..4c6da0854e025 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
index ebe4920593be2..873637ca49177 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
@@ -15,7 +15,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
@@ -24,7 +24,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
@@ -33,7 +33,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
@@ -42,7 +42,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
@@ -51,7 +51,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
@@ -69,7 +69,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
@@ -78,7 +78,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
@@ -87,7 +87,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
@@ -96,7 +96,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
@@ -105,7 +105,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
@@ -114,7 +114,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
@@ -123,7 +123,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
@@ -132,7 +132,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
@@ -141,7 +141,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
@@ -159,7 +159,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
@@ -168,7 +168,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
@@ -195,7 +195,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
@@ -204,7 +204,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
@@ -213,7 +213,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
@@ -222,7 +222,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
@@ -231,7 +231,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
@@ -547,7 +547,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1_ta(vuint32mf2_t op1, size_t vl) {
@@ -574,7 +574,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
index 0d5af0132719f..b6dd43be0da13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vaaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
index 4f47e0f222259..9024bbe0e21c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
@@ -16,7 +16,7 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
@@ -26,7 +26,7 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
@@ -36,7 +36,7 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
@@ -46,7 +46,7 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
@@ -56,7 +56,7 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
@@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
@@ -76,7 +76,7 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
@@ -86,7 +86,7 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
@@ -96,7 +96,7 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
@@ -106,7 +106,7 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
@@ -116,7 +116,7 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
@@ -126,7 +126,7 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
@@ -136,7 +136,7 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
@@ -146,7 +146,7 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
@@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
@@ -166,7 +166,7 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
@@ -176,7 +176,7 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
@@ -186,7 +186,7 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
@@ -196,7 +196,7 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
@@ -206,7 +206,7 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
@@ -216,7 +216,7 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
@@ -226,7 +226,7 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
@@ -236,7 +236,7 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
@@ -246,7 +246,7 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
@@ -256,7 +256,7 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
@@ -266,7 +266,7 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
@@ -276,7 +276,7 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
@@ -286,7 +286,7 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
@@ -296,7 +296,7 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
@@ -306,7 +306,7 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
@@ -316,7 +316,7 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
@@ -326,7 +326,7 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
@@ -336,7 +336,7 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
@@ -346,7 +346,7 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
@@ -356,7 +356,7 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
@@ -366,7 +366,7 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
@@ -376,7 +376,7 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
@@ -386,7 +386,7 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
@@ -396,7 +396,7 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
@@ -406,7 +406,7 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
@@ -416,7 +416,7 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
@@ -426,7 +426,7 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
@@ -436,7 +436,7 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
@@ -446,7 +446,7 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -456,7 +456,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
@@ -466,7 +466,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -476,7 +476,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
@@ -486,7 +486,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
@@ -496,7 +496,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
@@ -506,7 +506,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
@@ -516,7 +516,7 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
@@ -526,7 +526,7 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
@@ -536,7 +536,7 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
@@ -546,7 +546,7 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
@@ -556,7 +556,7 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
@@ -566,7 +566,7 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
@@ -576,7 +576,7 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
@@ -586,7 +586,7 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -596,7 +596,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
@@ -606,7 +606,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -616,7 +616,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
@@ -626,7 +626,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -636,7 +636,7 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
@@ -646,7 +646,7 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -656,7 +656,7 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
@@ -666,7 +666,7 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -676,7 +676,7 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
@@ -686,7 +686,7 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -696,7 +696,7 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
@@ -706,7 +706,7 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -716,7 +716,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
@@ -726,7 +726,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
@@ -736,7 +736,7 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
@@ -746,7 +746,7 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
@@ -756,7 +756,7 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
@@ -766,7 +766,7 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
@@ -776,7 +776,7 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
@@ -786,7 +786,7 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
@@ -796,7 +796,7 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
@@ -806,7 +806,7 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
@@ -816,7 +816,7 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
@@ -826,7 +826,7 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
@@ -836,7 +836,7 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
@@ -846,7 +846,7 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
@@ -856,7 +856,7 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
@@ -866,7 +866,7 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
@@ -876,7 +876,7 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
@@ -922,7 +922,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
@@ -931,7 +931,7 @@ vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
@@ -940,7 +940,7 @@ vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carr
 
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
@@ -949,7 +949,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64
 
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
index 772311fc3b5c6..923b7832b1ab8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -16,7 +16,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -25,7 +25,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -34,7 +34,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -43,7 +43,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -52,7 +52,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -61,7 +61,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -70,7 +70,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -79,7 +79,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -88,7 +88,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -97,7 +97,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -106,7 +106,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -115,7 +115,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -124,7 +124,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -133,7 +133,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -142,7 +142,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -169,7 +169,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -178,7 +178,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -187,7 +187,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -196,7 +196,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -205,7 +205,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -214,7 +214,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -223,7 +223,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -232,7 +232,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -241,7 +241,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -250,7 +250,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -259,7 +259,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -268,7 +268,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -277,7 +277,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -286,7 +286,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -295,7 +295,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -304,7 +304,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -313,7 +313,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -322,7 +322,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -331,7 +331,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -349,7 +349,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -358,7 +358,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -367,7 +367,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -376,7 +376,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -385,7 +385,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -394,7 +394,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -403,7 +403,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -412,7 +412,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -421,7 +421,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -430,7 +430,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -439,7 +439,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -448,7 +448,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -457,7 +457,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -466,7 +466,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -475,7 +475,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -484,7 +484,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -493,7 +493,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -502,7 +502,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -511,7 +511,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -529,7 +529,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -538,7 +538,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -556,7 +556,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -574,7 +574,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -583,7 +583,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -592,7 +592,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -601,7 +601,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -619,7 +619,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -628,7 +628,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -637,7 +637,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -646,7 +646,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -655,7 +655,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -664,7 +664,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -673,7 +673,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -682,7 +682,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -691,7 +691,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -709,7 +709,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -718,7 +718,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -727,7 +727,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -736,7 +736,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -745,7 +745,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -754,7 +754,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -763,7 +763,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -772,7 +772,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -781,7 +781,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -790,7 +790,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1627,7 +1627,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -1636,7 +1636,7 @@ vint8mf8_t test_vadd_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -1645,7 +1645,7 @@ vint8mf8_t test_vadd_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1654,7 +1654,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8
 
 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op
 
 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vadd_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -1753,7 +1753,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, s
 
 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1762,7 +1762,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vadd_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
index 744f820d5dd22..2d5bf6224e45c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vand_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
index 92b2e677d951c..c4b04d9ed4bb4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vasubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
index c119cdc6942d1..064b38ff3f459 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vdivu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
index acf1d720feffd..46067c06dc49c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -89,7 +89,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -98,7 +98,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -107,7 +107,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -116,7 +116,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -125,7 +125,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -134,7 +134,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -143,7 +143,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -152,7 +152,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -161,7 +161,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -233,7 +233,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -242,7 +242,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -269,7 +269,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -620,7 +620,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
index 6bc3a3501d75f..20b8b7f42cc3a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x i16> poison, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x i16> poison, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x i16> poison, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x i16> poison, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x i16> poison, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) {
@@ -224,7 +224,7 @@ vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x i16> poison, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) {
@@ -296,7 +296,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t op1, siz
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -323,7 +323,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vflo
 
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfclass_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
index 5486106c0f76b..058ec80337fe3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
@@ -17,7 +17,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
@@ -26,7 +26,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
@@ -35,7 +35,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
@@ -44,7 +44,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
@@ -53,7 +53,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
@@ -62,7 +62,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
@@ -71,7 +71,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
@@ -80,7 +80,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
@@ -89,7 +89,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
@@ -98,7 +98,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
@@ -107,7 +107,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
@@ -116,7 +116,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
@@ -125,7 +125,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
@@ -134,7 +134,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
@@ -143,7 +143,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
@@ -152,7 +152,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
@@ -161,7 +161,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
@@ -170,7 +170,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
@@ -179,7 +179,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
@@ -188,7 +188,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
@@ -197,7 +197,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
@@ -206,7 +206,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
@@ -215,7 +215,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
@@ -224,7 +224,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
@@ -233,7 +233,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
@@ -242,7 +242,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
@@ -251,7 +251,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
@@ -269,7 +269,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
@@ -278,7 +278,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
@@ -287,7 +287,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
@@ -296,7 +296,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
@@ -305,7 +305,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
@@ -314,7 +314,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
@@ -323,7 +323,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
@@ -332,7 +332,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
@@ -341,7 +341,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
@@ -350,7 +350,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
@@ -359,7 +359,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
@@ -368,7 +368,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
@@ -377,7 +377,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
@@ -386,7 +386,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
@@ -395,7 +395,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
@@ -404,7 +404,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
@@ -413,7 +413,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
@@ -422,7 +422,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
@@ -431,7 +431,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
@@ -440,7 +440,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
@@ -449,7 +449,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
@@ -458,7 +458,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
@@ -467,7 +467,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
@@ -476,7 +476,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
@@ -485,7 +485,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
@@ -1036,7 +1036,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
@@ -1045,7 +1045,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
@@ -1054,7 +1054,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
@@ -1063,7 +1063,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
@@ -1072,7 +1072,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
@@ -1081,7 +1081,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
@@ -1090,7 +1090,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
@@ -1099,7 +1099,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
@@ -1108,7 +1108,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
@@ -1117,7 +1117,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
@@ -1126,7 +1126,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
@@ -1135,7 +1135,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
@@ -1144,7 +1144,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
@@ -1153,7 +1153,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
@@ -1162,7 +1162,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
@@ -1171,7 +1171,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
@@ -1180,7 +1180,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
@@ -1189,7 +1189,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
@@ -1198,7 +1198,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
@@ -1207,7 +1207,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
@@ -1216,7 +1216,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
@@ -1225,7 +1225,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
@@ -1234,7 +1234,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
@@ -1243,7 +1243,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
@@ -1252,7 +1252,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) {
@@ -1261,7 +1261,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) {
@@ -1270,7 +1270,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) {
@@ -1279,7 +1279,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) {
@@ -1288,7 +1288,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) {
@@ -1297,7 +1297,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) {
@@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) {
@@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) {
@@ -1324,7 +1324,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) {
@@ -1333,7 +1333,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) {
@@ -1342,7 +1342,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) {
@@ -1351,7 +1351,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) {
@@ -1738,7 +1738,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t merge, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1747,7 +1747,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1756,7 +1756,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1765,7 +1765,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
@@ -1774,7 +1774,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) {
@@ -1783,7 +1783,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_ta(vuint32mf2_t src, size_t vl) {
@@ -1900,7 +1900,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge,
 
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1909,7 +1909,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1918,7 +1918,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1927,7 +1927,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, si
 
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1936,7 +1936,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -1945,7 +1945,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size
 
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
index 3c37e46013858..f260dca84e009 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -602,7 +602,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -656,7 +656,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -665,7 +665,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
index 955fb16050626..d680bfc844b59 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -602,7 +602,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -656,7 +656,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -665,7 +665,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmax_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
index 53a4eefd411ab..dde4e6be1fd59 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -602,7 +602,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -656,7 +656,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -665,7 +665,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmin_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
index d87d6f9f00661..fb505086e0bc3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -602,7 +602,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -656,7 +656,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -665,7 +665,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmul_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
index c3f83526638c3..e722a10f1d615 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(<vscale x 1 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(<vscale x 1 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.i64(<vscale x 2 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.i64(<vscale x 2 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.i64(<vscale x 4 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.i64(<vscale x 4 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.i64(<vscale x 8 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.i64(<vscale x 8 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.i64(<vscale x 16 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.i64(<vscale x 16 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.i64(<vscale x 1 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.i64(<vscale x 1 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.i64(<vscale x 2 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.i64(<vscale x 2 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.i64(<vscale x 4 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.i64(<vscale x 4 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.i64(<vscale x 8 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.i64(<vscale x 8 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.i64(<vscale x 1 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.i64(<vscale x 1 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) {
@@ -260,7 +260,7 @@ vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.i64(<vscale x 2 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.i64(<vscale x 2 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) {
@@ -269,7 +269,7 @@ vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.i64(<vscale x 4 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.i64(<vscale x 4 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) {
@@ -278,7 +278,7 @@ vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.i64(<vscale x 8 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.i64(<vscale x 8 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) {
@@ -287,7 +287,7 @@ vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.i64(<vscale x 16 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.i64(<vscale x 16 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) {
@@ -296,7 +296,7 @@ vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.i64(<vscale x 32 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.i64(<vscale x 32 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2_tu (vfloat32mf2_t merge, float src, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(<vscale x 1 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(<vscale x 1 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmv_v_f_f32mf2_ta (float src, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2_tu (vfloat32mf2_t merge, float src, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmv_s_f_f32mf2_ta (float src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
index 5f3bdda9c3a5d..7ff29c4e4ecb0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
@@ -17,7 +17,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
@@ -26,7 +26,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
@@ -35,7 +35,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
@@ -44,7 +44,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
@@ -53,7 +53,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
@@ -62,7 +62,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
@@ -71,7 +71,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
@@ -80,7 +80,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
@@ -89,7 +89,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
@@ -98,7 +98,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
@@ -107,7 +107,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
@@ -116,7 +116,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
@@ -125,7 +125,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
@@ -134,7 +134,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
@@ -143,7 +143,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
@@ -152,7 +152,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
@@ -161,7 +161,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
@@ -170,7 +170,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
@@ -197,7 +197,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
@@ -206,7 +206,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
@@ -215,7 +215,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
@@ -224,7 +224,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
@@ -233,7 +233,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
@@ -242,7 +242,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
@@ -251,7 +251,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
@@ -260,7 +260,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
@@ -269,7 +269,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
@@ -278,7 +278,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
@@ -287,7 +287,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
@@ -296,7 +296,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
@@ -305,7 +305,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
@@ -314,7 +314,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
@@ -323,7 +323,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
@@ -332,7 +332,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
@@ -341,7 +341,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
@@ -350,7 +350,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
@@ -359,7 +359,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
@@ -368,7 +368,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
@@ -377,7 +377,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
@@ -386,7 +386,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
@@ -395,7 +395,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
@@ -404,7 +404,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
@@ -413,7 +413,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
@@ -431,7 +431,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
@@ -449,7 +449,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
@@ -458,7 +458,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
@@ -467,7 +467,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
@@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
@@ -1021,7 +1021,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
@@ -1030,7 +1030,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
@@ -1039,7 +1039,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
@@ -1048,7 +1048,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
@@ -1057,7 +1057,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
@@ -1066,7 +1066,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
@@ -1075,7 +1075,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
@@ -1084,7 +1084,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
@@ -1093,7 +1093,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
@@ -1102,7 +1102,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
@@ -1111,7 +1111,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
@@ -1120,7 +1120,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
@@ -1129,7 +1129,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
@@ -1138,7 +1138,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
@@ -1147,7 +1147,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
@@ -1156,7 +1156,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
@@ -1165,7 +1165,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
@@ -1174,7 +1174,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
@@ -1183,7 +1183,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
@@ -1192,7 +1192,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
@@ -1201,7 +1201,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
@@ -1210,7 +1210,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
@@ -1219,7 +1219,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
@@ -1228,7 +1228,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) {
@@ -1237,7 +1237,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) {
@@ -1246,7 +1246,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) {
@@ -1255,7 +1255,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) {
@@ -1264,7 +1264,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) {
@@ -1273,7 +1273,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) {
@@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) {
@@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) {
@@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) {
@@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) {
@@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
@@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
@@ -1336,7 +1336,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
@@ -1345,7 +1345,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
@@ -1354,7 +1354,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
@@ -1363,7 +1363,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
@@ -1372,7 +1372,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
@@ -1381,7 +1381,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
@@ -1390,7 +1390,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
@@ -1399,7 +1399,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
@@ -1876,7 +1876,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1885,7 +1885,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1894,7 +1894,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1903,7 +1903,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1912,7 +1912,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) {
@@ -1921,7 +1921,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) {
@@ -1930,7 +1930,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -1939,7 +1939,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) {
@@ -2092,7 +2092,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t me
 
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -2101,7 +2101,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -2110,7 +2110,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -2119,7 +2119,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -2128,7 +2128,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t sr
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -2137,7 +2137,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
@@ -2146,7 +2146,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -2155,7 +2155,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
index c7531781d2981..f0bf7d702af6d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -296,7 +296,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, f
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -323,7 +323,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
index 9ff88419c9b17..18c2940c358f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -296,7 +296,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -323,7 +323,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrec7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
index e47fed03c729e..d3d1fa51cef9b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
@@ -315,7 +315,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -333,7 +333,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merg
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
index 557605edf39f9..a887537f1c146 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
@@ -315,7 +315,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -333,7 +333,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merg
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
index f1ee4e2ffdbff..7be3840dc78d4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -621,7 +621,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -639,7 +639,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mer
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -657,7 +657,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
@@ -675,7 +675,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mer
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
index 607c9b0380842..0b20750e8d250 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -296,7 +296,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -323,7 +323,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
index 3ec3c37be74b7..27b736c23a2b9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -296,7 +296,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, f
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -323,7 +323,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfrsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
index 0118486c110a0..b33b4ec26a2e2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -189,7 +189,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -208,7 +208,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -217,7 +217,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -227,7 +227,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -236,7 +236,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -246,7 +246,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -255,7 +255,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -274,7 +274,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -284,7 +284,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -293,7 +293,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -303,7 +303,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -312,7 +312,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -322,7 +322,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -331,7 +331,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -341,7 +341,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -360,7 +360,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -369,7 +369,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -379,7 +379,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -388,7 +388,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -398,7 +398,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -407,7 +407,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -417,7 +417,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -426,7 +426,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -436,7 +436,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -455,7 +455,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -464,7 +464,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -474,7 +474,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -483,7 +483,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -493,7 +493,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -502,7 +502,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -512,7 +512,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -1090,7 +1090,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1099,7 +1099,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1108,7 +1108,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1117,7 +1117,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1126,7 +1126,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1135,7 +1135,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1144,7 +1144,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1153,7 +1153,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1162,7 +1162,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1171,7 +1171,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1180,7 +1180,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1189,7 +1189,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1198,7 +1198,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1207,7 +1207,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1216,7 +1216,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1225,7 +1225,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1234,7 +1234,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1243,7 +1243,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1252,7 +1252,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1261,7 +1261,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1270,7 +1270,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1279,7 +1279,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1288,7 +1288,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1297,7 +1297,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -1324,7 +1324,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -1333,7 +1333,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -1342,7 +1342,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -1351,7 +1351,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -1360,7 +1360,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -1369,7 +1369,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -1378,7 +1378,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1387,7 +1387,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1396,7 +1396,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1405,7 +1405,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1792,7 +1792,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1801,7 +1801,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, siz
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1810,7 +1810,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1819,7 +1819,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, si
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1828,7 +1828,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1837,7 +1837,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, si
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1954,7 +1954,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1963,7 +1963,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vflo
 
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1972,7 +1972,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, floa
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1981,7 +1981,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfl
 
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1990,7 +1990,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, flo
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1999,7 +1999,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfl
 
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
index 133748e3e280c..6909abab9649d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
@@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
@@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
@@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
@@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
@@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
@@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
@@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
@@ -203,7 +203,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
@@ -212,7 +212,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, si
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
@@ -221,7 +221,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, si
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
@@ -230,7 +230,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
@@ -239,7 +239,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
@@ -248,7 +248,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
@@ -320,7 +320,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) {
@@ -347,7 +347,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mer
 
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1down_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
index 3f76d42af0a4a..037455510d4a9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
@@ -28,7 +28,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
@@ -38,7 +38,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
@@ -48,7 +48,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
@@ -58,7 +58,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
@@ -68,7 +68,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
@@ -78,7 +78,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
@@ -88,7 +88,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
@@ -198,7 +198,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
@@ -207,7 +207,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
@@ -216,7 +216,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
@@ -225,7 +225,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
@@ -234,7 +234,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
@@ -243,7 +243,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
@@ -315,7 +315,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t sr
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) {
@@ -342,7 +342,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge
 
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfslide1up_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
index cac45493f925a..7c7c30c11282e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -62,7 +62,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -71,7 +71,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -179,7 +179,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -206,7 +206,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -215,7 +215,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -224,7 +224,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -296,7 +296,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
@@ -323,7 +323,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsqrt_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
index 6d0a05af50151..5418fda526794 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
@@ -113,7 +113,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
@@ -132,7 +132,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
@@ -170,7 +170,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -377,7 +377,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -386,7 +386,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -395,7 +395,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -404,7 +404,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -413,7 +413,7 @@ vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -422,7 +422,7 @@ vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -431,7 +431,7 @@ vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -449,7 +449,7 @@ vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -458,7 +458,7 @@ vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -467,7 +467,7 @@ vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -602,7 +602,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, fl
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -656,7 +656,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfl
 
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -665,7 +665,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloa
 
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
index f98a4eda0c643..37d2d29950d3a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -37,7 +37,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -56,7 +56,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -75,7 +75,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -94,7 +94,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -113,7 +113,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -132,7 +132,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) {
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -409,7 +409,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
@@ -724,7 +724,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -733,7 +733,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -742,7 +742,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -751,7 +751,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) {
@@ -832,7 +832,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat3
 
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwadd_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
index 01e12bd3bd026..f2e815c126f06 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
@@ -17,7 +17,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
@@ -26,7 +26,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
@@ -35,7 +35,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
@@ -44,7 +44,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
@@ -53,7 +53,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
@@ -62,7 +62,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
@@ -71,7 +71,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
@@ -89,7 +89,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
@@ -98,7 +98,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
@@ -107,7 +107,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
@@ -116,7 +116,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
@@ -125,7 +125,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
@@ -134,7 +134,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
@@ -143,7 +143,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
@@ -152,7 +152,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
@@ -161,7 +161,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
@@ -170,7 +170,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
@@ -179,7 +179,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
@@ -188,7 +188,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
@@ -197,7 +197,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
@@ -206,7 +206,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
@@ -215,7 +215,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
@@ -224,7 +224,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
@@ -233,7 +233,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
@@ -242,7 +242,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
@@ -251,7 +251,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
@@ -269,7 +269,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
@@ -278,7 +278,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
@@ -287,7 +287,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
@@ -296,7 +296,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
@@ -305,7 +305,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
@@ -314,7 +314,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
@@ -323,7 +323,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
@@ -332,7 +332,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
@@ -341,7 +341,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
@@ -735,7 +735,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) {
@@ -744,7 +744,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) {
@@ -753,7 +753,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) {
@@ -762,7 +762,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) {
@@ -771,7 +771,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) {
@@ -780,7 +780,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) {
@@ -789,7 +789,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) {
@@ -798,7 +798,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) {
@@ -807,7 +807,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) {
@@ -816,7 +816,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) {
@@ -825,7 +825,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) {
@@ -834,7 +834,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) {
@@ -843,7 +843,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
@@ -852,7 +852,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
@@ -861,7 +861,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
@@ -870,7 +870,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
@@ -879,7 +879,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
@@ -888,7 +888,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
@@ -897,7 +897,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
@@ -906,7 +906,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
@@ -915,7 +915,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
@@ -924,7 +924,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
@@ -933,7 +933,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
@@ -942,7 +942,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
@@ -951,7 +951,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
@@ -960,7 +960,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
@@ -969,7 +969,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
@@ -978,7 +978,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
@@ -987,7 +987,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
@@ -996,7 +996,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
@@ -1005,7 +1005,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
@@ -1014,7 +1014,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
@@ -1023,7 +1023,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) {
@@ -1032,7 +1032,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) {
@@ -1041,7 +1041,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) {
@@ -1050,7 +1050,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) {
@@ -1059,7 +1059,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) {
@@ -1464,7 +1464,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -1473,7 +1473,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -1482,7 +1482,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) {
@@ -1509,7 +1509,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) {
@@ -1518,7 +1518,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1_ta(vfloat32mf2_t src, size_t vl) {
@@ -1653,7 +1653,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vf
 
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1662,7 +1662,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1671,7 +1671,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, s
 
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1680,7 +1680,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
@@ -1689,7 +1689,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
@@ -1698,7 +1698,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
@@ -1707,7 +1707,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, siz
 
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
index 4a37033312cc2..227617aec7bda 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -56,7 +56,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -75,7 +75,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -168,7 +168,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -186,7 +186,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -204,7 +204,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -222,7 +222,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -366,7 +366,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, floa
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -420,7 +420,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmul_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
index c0cd8514f9853..a30a26fe78a0a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
@@ -453,7 +453,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -471,7 +471,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t me
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -489,7 +489,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
@@ -507,7 +507,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t me
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
index 794abe0eeae36..39b83a8f2033e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
@@ -18,7 +18,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -27,7 +27,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
@@ -37,7 +37,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
@@ -46,7 +46,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
@@ -56,7 +56,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
@@ -65,7 +65,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
@@ -75,7 +75,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
@@ -84,7 +84,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
@@ -94,7 +94,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
@@ -103,7 +103,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
@@ -113,7 +113,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
@@ -122,7 +122,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
@@ -132,7 +132,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
@@ -141,7 +141,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
@@ -151,7 +151,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -337,7 +337,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) {
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -409,7 +409,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -445,7 +445,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
@@ -724,7 +724,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -733,7 +733,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -742,7 +742,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -751,7 +751,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) {
@@ -832,7 +832,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float
 
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat3
 
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwsub_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
index 8f30698024cb0..e33d7b6d5814d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
@@ -6,154 +6,154 @@
 
 // CHECK-RV64-LABEL: @test_vid_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(<vscale x 1 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(<vscale x 1 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(<vscale x 2 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(<vscale x 2 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(<vscale x 4 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(<vscale x 4 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(<vscale x 8 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(<vscale x 8 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(<vscale x 16 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(<vscale x 16 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(<vscale x 32 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(<vscale x 32 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(<vscale x 64 x i8> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(<vscale x 64 x i8> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(<vscale x 1 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(<vscale x 1 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(<vscale x 2 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(<vscale x 2 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(<vscale x 4 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(<vscale x 4 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(<vscale x 8 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(<vscale x 8 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(<vscale x 16 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(<vscale x 16 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(<vscale x 32 x i16> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(<vscale x 32 x i16> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(<vscale x 2 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(<vscale x 2 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(<vscale x 4 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(<vscale x 4 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(<vscale x 8 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(<vscale x 8 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(<vscale x 16 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(<vscale x 16 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(<vscale x 1 x i64> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(<vscale x 1 x i64> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(<vscale x 2 x i64> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(<vscale x 2 x i64> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(<vscale x 4 x i64> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(<vscale x 4 x i64> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
 
 // CHECK-RV64-LABEL: @test_vid_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(<vscale x 8 x i64> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(<vscale x 8 x i64> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
@@ -385,7 +385,7 @@ vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> poison, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vid_v_u32mf2_ta(size_t vl) {
@@ -412,7 +412,7 @@ vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t v
 
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vid_v_u32mf2_tama(vbool64_t mask, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
index 5e25700559c9a..bcfd671c4d7de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
@@ -42,7 +42,7 @@ vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
@@ -51,7 +51,7 @@ vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
@@ -78,7 +78,7 @@ vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
@@ -87,7 +87,7 @@ vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
@@ -96,7 +96,7 @@ vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
@@ -105,7 +105,7 @@ vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
@@ -114,7 +114,7 @@ vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
@@ -123,7 +123,7 @@ vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
@@ -132,7 +132,7 @@ vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
@@ -141,7 +141,7 @@ vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
@@ -159,7 +159,7 @@ vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
@@ -168,7 +168,7 @@ vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
@@ -177,7 +177,7 @@ vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
@@ -186,7 +186,7 @@ vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
@@ -195,7 +195,7 @@ vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
@@ -433,7 +433,7 @@ vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t merge, vbool64_t op1, size_t vl
 
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_viota_m_u32mf2_ta(vbool64_t op1, size_t vl) {
@@ -460,7 +460,7 @@ vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vbool6
 
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_viota_m_u32mf2_tama(vbool64_t mask, vbool64_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
index bd58397ad9105..1b68d1227eb71 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) {
@@ -17,7 +17,7 @@ vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) {
@@ -35,7 +35,7 @@ vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) {
@@ -44,7 +44,7 @@ vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) {
@@ -53,7 +53,7 @@ vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) {
@@ -62,7 +62,7 @@ vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) {
@@ -71,7 +71,7 @@ vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) {
@@ -80,7 +80,7 @@ vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) {
@@ -89,7 +89,7 @@ vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) {
@@ -134,7 +134,7 @@ vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) {
@@ -143,7 +143,7 @@ vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) {
@@ -152,7 +152,7 @@ vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) {
@@ -161,7 +161,7 @@ vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) {
@@ -170,7 +170,7 @@ vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) {
@@ -179,7 +179,7 @@ vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) {
@@ -188,7 +188,7 @@ vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) {
@@ -197,7 +197,7 @@ vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) {
@@ -206,7 +206,7 @@ vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) {
@@ -215,7 +215,7 @@ vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) {
@@ -224,7 +224,7 @@ vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) {
@@ -233,7 +233,7 @@ vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) {
@@ -242,7 +242,7 @@ vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) {
@@ -251,7 +251,7 @@ vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) {
@@ -260,7 +260,7 @@ vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) {
@@ -269,7 +269,7 @@ vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) {
@@ -278,7 +278,7 @@ vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) {
@@ -287,7 +287,7 @@ vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) {
@@ -296,7 +296,7 @@ vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) {
@@ -305,7 +305,7 @@ vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) {
@@ -314,7 +314,7 @@ vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) {
@@ -323,7 +323,7 @@ vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) {
@@ -332,7 +332,7 @@ vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) {
@@ -341,7 +341,7 @@ vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) {
@@ -350,7 +350,7 @@ vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) {
@@ -359,7 +359,7 @@ vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) {
@@ -368,7 +368,7 @@ vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) {
@@ -377,7 +377,7 @@ vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) {
@@ -386,7 +386,7 @@ vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) {
@@ -395,7 +395,7 @@ vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) {
@@ -404,7 +404,7 @@ vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) {
@@ -413,7 +413,7 @@ vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) {
@@ -422,7 +422,7 @@ vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) {
@@ -431,7 +431,7 @@ vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) {
@@ -449,7 +449,7 @@ vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) {
@@ -458,7 +458,7 @@ vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) {
@@ -467,7 +467,7 @@ vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) {
@@ -476,7 +476,7 @@ vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) {
@@ -962,7 +962,7 @@ vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) {
@@ -971,7 +971,7 @@ vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) {
@@ -980,7 +980,7 @@ vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) {
@@ -989,7 +989,7 @@ vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) {
@@ -998,7 +998,7 @@ vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) {
@@ -1007,7 +1007,7 @@ vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle16_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) {
@@ -1150,7 +1150,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t merge, const uint8_t *base, size_t
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vle8_v_i8mf8_ta(const int8_t *base, size_t vl) {
@@ -1159,7 +1159,7 @@ vint8mf8_t test_vle8_v_i8mf8_ta(const int8_t *base, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vle8_v_u8mf8_ta(const uint8_t *base, size_t vl) {
@@ -1204,7 +1204,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, const uint
 
 // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vle8_v_i8mf8_tama(vbool64_t mask, const int8_t *base, size_t vl) {
@@ -1213,7 +1213,7 @@ vint8mf8_t test_vle8_v_i8mf8_tama(vbool64_t mask, const int8_t *base, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vle8_v_u8mf8_tama(vbool64_t mask, const uint8_t *base, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
index 06b7d1853de8a..94212b015766f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -17,7 +17,7 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -35,7 +35,7 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -44,7 +44,7 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -53,7 +53,7 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -62,7 +62,7 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -71,7 +71,7 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -80,7 +80,7 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -89,7 +89,7 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -98,7 +98,7 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -107,7 +107,7 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -116,7 +116,7 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -125,7 +125,7 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -134,7 +134,7 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -143,7 +143,7 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -152,7 +152,7 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -161,7 +161,7 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -170,7 +170,7 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -179,7 +179,7 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -188,7 +188,7 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -197,7 +197,7 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -206,7 +206,7 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -215,7 +215,7 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -224,7 +224,7 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -233,7 +233,7 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -242,7 +242,7 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -251,7 +251,7 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -260,7 +260,7 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -269,7 +269,7 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -278,7 +278,7 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -287,7 +287,7 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -296,7 +296,7 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -305,7 +305,7 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -314,7 +314,7 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -323,7 +323,7 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -332,7 +332,7 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -341,7 +341,7 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -350,7 +350,7 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -359,7 +359,7 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -368,7 +368,7 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -377,7 +377,7 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -386,7 +386,7 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -395,7 +395,7 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -404,7 +404,7 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -413,7 +413,7 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -422,7 +422,7 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -431,7 +431,7 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -440,7 +440,7 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -449,7 +449,7 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -458,7 +458,7 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -467,7 +467,7 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -476,7 +476,7 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -485,7 +485,7 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -494,7 +494,7 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -503,7 +503,7 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -512,7 +512,7 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -521,7 +521,7 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -530,7 +530,7 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -539,7 +539,7 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -548,7 +548,7 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -557,7 +557,7 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -566,7 +566,7 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -575,7 +575,7 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -584,7 +584,7 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -593,7 +593,7 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -602,7 +602,7 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -611,7 +611,7 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -620,7 +620,7 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -629,7 +629,7 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -638,7 +638,7 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -647,7 +647,7 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -656,7 +656,7 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -665,7 +665,7 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -674,7 +674,7 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -683,7 +683,7 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -692,7 +692,7 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -701,7 +701,7 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -710,7 +710,7 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -719,7 +719,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -728,7 +728,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -737,7 +737,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -746,7 +746,7 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -755,7 +755,7 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -764,7 +764,7 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -773,7 +773,7 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -782,7 +782,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -791,7 +791,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -800,7 +800,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -809,7 +809,7 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -818,7 +818,7 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -827,7 +827,7 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -836,7 +836,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -845,7 +845,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -854,7 +854,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -863,7 +863,7 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -872,7 +872,7 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -881,7 +881,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -890,7 +890,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -899,7 +899,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -908,7 +908,7 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -917,7 +917,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -926,7 +926,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -935,7 +935,7 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -944,7 +944,7 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -953,7 +953,7 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -962,7 +962,7 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -971,7 +971,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -980,7 +980,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -989,7 +989,7 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -998,7 +998,7 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1007,7 +1007,7 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -1016,7 +1016,7 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1025,7 +1025,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1034,7 +1034,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1043,7 +1043,7 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1052,7 +1052,7 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1061,7 +1061,7 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1070,7 +1070,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1079,7 +1079,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1088,7 +1088,7 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1097,7 +1097,7 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1106,7 +1106,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1115,7 +1115,7 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1124,7 +1124,7 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1133,7 +1133,7 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -1142,7 +1142,7 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1151,7 +1151,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1160,7 +1160,7 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1169,7 +1169,7 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1178,7 +1178,7 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1187,7 +1187,7 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1196,7 +1196,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1205,7 +1205,7 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1214,7 +1214,7 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1223,7 +1223,7 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1232,7 +1232,7 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1241,7 +1241,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1250,7 +1250,7 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1259,7 +1259,7 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1268,7 +1268,7 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1277,7 +1277,7 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1286,7 +1286,7 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1295,7 +1295,7 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1304,7 +1304,7 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1313,7 +1313,7 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1322,7 +1322,7 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1331,7 +1331,7 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1340,7 +1340,7 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1349,7 +1349,7 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1358,7 +1358,7 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1367,7 +1367,7 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1376,7 +1376,7 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1385,7 +1385,7 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1394,7 +1394,7 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1403,7 +1403,7 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1412,7 +1412,7 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
@@ -1421,7 +1421,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
@@ -1430,7 +1430,7 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
@@ -1439,7 +1439,7 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
@@ -1448,7 +1448,7 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
@@ -1457,7 +1457,7 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
@@ -1466,7 +1466,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
@@ -1475,7 +1475,7 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
@@ -1484,7 +1484,7 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
@@ -1493,7 +1493,7 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
@@ -1502,7 +1502,7 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -1511,7 +1511,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
@@ -1520,7 +1520,7 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
@@ -1529,7 +1529,7 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
@@ -1538,7 +1538,7 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
@@ -1547,7 +1547,7 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
@@ -1556,7 +1556,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
@@ -1565,7 +1565,7 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
@@ -1574,7 +1574,7 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
@@ -1583,7 +1583,7 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
@@ -1592,7 +1592,7 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
@@ -1601,7 +1601,7 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
@@ -1610,7 +1610,7 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
@@ -1619,7 +1619,7 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
@@ -1628,7 +1628,7 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
@@ -1637,7 +1637,7 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
@@ -1646,7 +1646,7 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
@@ -1655,7 +1655,7 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
@@ -1664,7 +1664,7 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
@@ -1673,7 +1673,7 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
@@ -1682,7 +1682,7 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
@@ -1691,7 +1691,7 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
@@ -1700,7 +1700,7 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
@@ -1709,7 +1709,7 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
@@ -1718,7 +1718,7 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
@@ -3446,7 +3446,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
@@ -3455,7 +3455,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
@@ -3464,7 +3464,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
@@ -3473,7 +3473,7 @@ vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) {
@@ -3482,7 +3482,7 @@ vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) {
@@ -3491,7 +3491,7 @@ vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) {
@@ -3500,7 +3500,7 @@ vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
@@ -3509,7 +3509,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex,
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
@@ -3518,7 +3518,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) {
@@ -3527,7 +3527,7 @@ vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) {
@@ -3536,7 +3536,7 @@ vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) {
@@ -3545,7 +3545,7 @@ vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) {
@@ -3554,7 +3554,7 @@ vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
@@ -3563,7 +3563,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) {
@@ -3572,7 +3572,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) {
@@ -3581,7 +3581,7 @@ vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) {
@@ -3590,7 +3590,7 @@ vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) {
@@ -3599,7 +3599,7 @@ vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) {
@@ -3608,7 +3608,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) {
@@ -3617,7 +3617,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) {
@@ -3626,7 +3626,7 @@ vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) {
@@ -3851,7 +3851,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t merge, const float *base,
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3860,7 +3860,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3869,7 +3869,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2_ta(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -3932,7 +3932,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, c
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vloxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3941,7 +3941,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vui
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vloxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3950,7 +3950,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, v
 
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vloxei32_v_f32mf2_tama(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
index 004d1e9aa1e8e..37fc336b118cd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptr
diff _t bstride,
@@ -18,7 +18,7 @@ vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptr
diff _t bstride,
@@ -28,7 +28,7 @@ vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptr
diff _t bstride,
@@ -38,7 +38,7 @@ vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptr
diff _t bstride, size_t vl) {
@@ -47,7 +47,7 @@ vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptr
diff _t bstride, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptr
diff _t bstride, size_t vl) {
@@ -56,7 +56,7 @@ vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptr
diff _t bstride, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptr
diff _t bstride, size_t vl) {
@@ -65,7 +65,7 @@ vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptr
diff _t bstride, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vlse8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptr
diff _t bstride, size_t vl) {
@@ -74,7 +74,7 @@ vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptr
diff _t bstride, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptr
diff _t bstride,
@@ -84,7 +84,7 @@ vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptr
diff _t bstride,
@@ -94,7 +94,7 @@ vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptr
diff _t bstride,
@@ -104,7 +104,7 @@ vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptr
diff _t bstride,
@@ -114,7 +114,7 @@ vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptr
diff _t bstride,
@@ -124,7 +124,7 @@ vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptr
diff _t bstride,
@@ -134,7 +134,7 @@ vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptr
diff _t bstride,
@@ -144,7 +144,7 @@ vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptr
diff _t bstride,
@@ -154,7 +154,7 @@ vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptr
diff _t bstride,
@@ -164,7 +164,7 @@ vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptr
diff _t bstride,
@@ -174,7 +174,7 @@ vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptr
diff _t bstride,
@@ -184,7 +184,7 @@ vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptr
diff _t bstride,
@@ -194,7 +194,7 @@ vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptr
diff _t bstride,
@@ -204,7 +204,7 @@ vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptr
diff _t bstride,
@@ -214,7 +214,7 @@ vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptr
diff _t bstride,
@@ -224,7 +224,7 @@ vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptr
diff _t bstride,
@@ -234,7 +234,7 @@ vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptr
diff _t bstride,
@@ -244,7 +244,7 @@ vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptr
diff _t bstride,
@@ -254,7 +254,7 @@ vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptr
diff _t bstride,
@@ -264,7 +264,7 @@ vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptr
diff _t bstride,
@@ -274,7 +274,7 @@ vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptr
diff _t bstride,
@@ -284,7 +284,7 @@ vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptr
diff _t bstride,
@@ -294,7 +294,7 @@ vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptr
diff _t bstride,
@@ -304,7 +304,7 @@ vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptr
diff _t bstride,
@@ -314,7 +314,7 @@ vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptr
diff _t bstride,
@@ -324,7 +324,7 @@ vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptr
diff _t bstride,
@@ -334,7 +334,7 @@ vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptr
diff _t bstride,
@@ -344,7 +344,7 @@ vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptr
diff _t bstride,
@@ -354,7 +354,7 @@ vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptr
diff _t bstride,
@@ -364,7 +364,7 @@ vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptr
diff _t bstride,
@@ -374,7 +374,7 @@ vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptr
diff _t bstride,
@@ -384,7 +384,7 @@ vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptr
diff _t bstride,
@@ -394,7 +394,7 @@ vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptr
diff _t bstride,
@@ -404,7 +404,7 @@ vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptr
diff _t bstride,
@@ -414,7 +414,7 @@ vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptr
diff _t bstride,
@@ -424,7 +424,7 @@ vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptr
diff _t bstride,
@@ -434,7 +434,7 @@ vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptr
diff _t bstride,
@@ -444,7 +444,7 @@ vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptr
diff _t bstride,
@@ -454,7 +454,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptr
diff _t bstride,
@@ -464,7 +464,7 @@ vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptr
diff _t bstride,
@@ -474,7 +474,7 @@ vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptr
diff _t bstride,
@@ -484,7 +484,7 @@ vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptr
diff _t bstride,
@@ -494,7 +494,7 @@ vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptr
diff _t bstride,
@@ -504,7 +504,7 @@ vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptr
diff _t bstride,
@@ -514,7 +514,7 @@ vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptr
diff _t bstride,
@@ -524,7 +524,7 @@ vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptr
diff _t bstride,
 
 // CHECK-RV64-LABEL: @test_vlse64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptr
diff _t bstride,
@@ -1117,7 +1117,7 @@ vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vlse16_v_f16mf4(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1126,7 +1126,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4(const _Float16 *base, ptr
diff _t bstride, size
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vlse16_v_f16mf2(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1135,7 +1135,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2(const _Float16 *base, ptr
diff _t bstride, size
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1144,7 +1144,7 @@ vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptr
diff _t bstride, size_t
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vlse16_v_f16m2(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1153,7 +1153,7 @@ vfloat16m2_t test_vlse16_v_f16m2(const _Float16 *base, ptr
diff _t bstride, size_t
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vlse16_v_f16m4(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1162,7 +1162,7 @@ vfloat16m4_t test_vlse16_v_f16m4(const _Float16 *base, ptr
diff _t bstride, size_t
 
 // CHECK-RV64-LABEL: @test_vlse16_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vlse16_v_f16m8(const _Float16 *base, ptr
diff _t bstride, size_t vl) {
@@ -1252,7 +1252,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t merge, const float *base, pt
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vlse32_v_i32mf2_ta(const int32_t *base, ptr
diff _t bstride, size_t vl) {
@@ -1261,7 +1261,7 @@ vint32mf2_t test_vlse32_v_i32mf2_ta(const int32_t *base, ptr
diff _t bstride, size
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vlse32_v_u32mf2_ta(const uint32_t *base, ptr
diff _t bstride, size_t vl) {
@@ -1270,7 +1270,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_ta(const uint32_t *base, ptr
diff _t bstride, si
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vlse32_v_f32mf2_ta(const float *base, ptr
diff _t bstride, size_t vl) {
@@ -1333,7 +1333,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, con
 
 // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vlse32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, ptr
diff _t bstride, size_t vl) {
@@ -1342,7 +1342,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, ptrdi
 
 // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vlse32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
@@ -1351,7 +1351,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, ptr
 
 // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vlse32_v_f32mf2_tama(vbool64_t mask, const float *base, ptr
diff _t bstride, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
index 3305752ce7db6..8fc4a585eaab6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -17,7 +17,7 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -35,7 +35,7 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -44,7 +44,7 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -53,7 +53,7 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -62,7 +62,7 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -71,7 +71,7 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -80,7 +80,7 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -89,7 +89,7 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -98,7 +98,7 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -107,7 +107,7 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -116,7 +116,7 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -125,7 +125,7 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -134,7 +134,7 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -143,7 +143,7 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -152,7 +152,7 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -161,7 +161,7 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -170,7 +170,7 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -179,7 +179,7 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -188,7 +188,7 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -197,7 +197,7 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -206,7 +206,7 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -215,7 +215,7 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -224,7 +224,7 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -233,7 +233,7 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -242,7 +242,7 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -251,7 +251,7 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -260,7 +260,7 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -269,7 +269,7 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -278,7 +278,7 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -287,7 +287,7 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -296,7 +296,7 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -305,7 +305,7 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -314,7 +314,7 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -323,7 +323,7 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -332,7 +332,7 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -341,7 +341,7 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -350,7 +350,7 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -359,7 +359,7 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -368,7 +368,7 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -377,7 +377,7 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -386,7 +386,7 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -395,7 +395,7 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -404,7 +404,7 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -413,7 +413,7 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -422,7 +422,7 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -431,7 +431,7 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -440,7 +440,7 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -449,7 +449,7 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -458,7 +458,7 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -467,7 +467,7 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -476,7 +476,7 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -485,7 +485,7 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -494,7 +494,7 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -503,7 +503,7 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -512,7 +512,7 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -521,7 +521,7 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -530,7 +530,7 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -539,7 +539,7 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -548,7 +548,7 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -557,7 +557,7 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -566,7 +566,7 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -575,7 +575,7 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -584,7 +584,7 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -593,7 +593,7 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -602,7 +602,7 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -611,7 +611,7 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -620,7 +620,7 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -629,7 +629,7 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -638,7 +638,7 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -647,7 +647,7 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -656,7 +656,7 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -665,7 +665,7 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -674,7 +674,7 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -683,7 +683,7 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -692,7 +692,7 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -701,7 +701,7 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -710,7 +710,7 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -719,7 +719,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -728,7 +728,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -737,7 +737,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
@@ -746,7 +746,7 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
@@ -755,7 +755,7 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
@@ -764,7 +764,7 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, ptr [[BASE:%.*]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
 vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
@@ -773,7 +773,7 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -782,7 +782,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -791,7 +791,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
@@ -800,7 +800,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
@@ -809,7 +809,7 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
@@ -818,7 +818,7 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
 vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
@@ -827,7 +827,7 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -836,7 +836,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
@@ -845,7 +845,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
@@ -854,7 +854,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
@@ -863,7 +863,7 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
 vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
@@ -872,7 +872,7 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
 vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
@@ -881,7 +881,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
 vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
@@ -890,7 +890,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
 vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
@@ -899,7 +899,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
 vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
@@ -908,7 +908,7 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -917,7 +917,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -926,7 +926,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -935,7 +935,7 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
@@ -944,7 +944,7 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
@@ -953,7 +953,7 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
@@ -962,7 +962,7 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -971,7 +971,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -980,7 +980,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
@@ -989,7 +989,7 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
@@ -998,7 +998,7 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1007,7 +1007,7 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
 vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
@@ -1016,7 +1016,7 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1025,7 +1025,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1034,7 +1034,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1043,7 +1043,7 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1052,7 +1052,7 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
 vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1061,7 +1061,7 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
 vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1070,7 +1070,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
 vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1079,7 +1079,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
 vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1088,7 +1088,7 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
 vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1097,7 +1097,7 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1106,7 +1106,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1115,7 +1115,7 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1124,7 +1124,7 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1133,7 +1133,7 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
@@ -1142,7 +1142,7 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1151,7 +1151,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1160,7 +1160,7 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1169,7 +1169,7 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1178,7 +1178,7 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
@@ -1187,7 +1187,7 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1196,7 +1196,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1205,7 +1205,7 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1214,7 +1214,7 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1223,7 +1223,7 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
 vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
@@ -1232,7 +1232,7 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1241,7 +1241,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
 vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1250,7 +1250,7 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1259,7 +1259,7 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
 vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1268,7 +1268,7 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
@@ -1277,7 +1277,7 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
@@ -1286,7 +1286,7 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
@@ -1295,7 +1295,7 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
@@ -1304,7 +1304,7 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
@@ -1313,7 +1313,7 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
@@ -1322,7 +1322,7 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
@@ -1331,7 +1331,7 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
@@ -1340,7 +1340,7 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -1349,7 +1349,7 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
@@ -1358,7 +1358,7 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
@@ -1367,7 +1367,7 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
@@ -1376,7 +1376,7 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
 vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
@@ -1385,7 +1385,7 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
@@ -1394,7 +1394,7 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
 vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
@@ -1403,7 +1403,7 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
 vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
@@ -1412,7 +1412,7 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
@@ -1421,7 +1421,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
@@ -1430,7 +1430,7 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
@@ -1439,7 +1439,7 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
@@ -1448,7 +1448,7 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
@@ -1457,7 +1457,7 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
@@ -1466,7 +1466,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
@@ -1475,7 +1475,7 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
@@ -1484,7 +1484,7 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
@@ -1493,7 +1493,7 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
@@ -1502,7 +1502,7 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -1511,7 +1511,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
@@ -1520,7 +1520,7 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
@@ -1529,7 +1529,7 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
@@ -1538,7 +1538,7 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
 vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
@@ -1547,7 +1547,7 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
@@ -1556,7 +1556,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
 vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
@@ -1565,7 +1565,7 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
 vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
@@ -1574,7 +1574,7 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
 vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
@@ -1583,7 +1583,7 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
@@ -1592,7 +1592,7 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
@@ -1601,7 +1601,7 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
@@ -1610,7 +1610,7 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
@@ -1619,7 +1619,7 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
@@ -1628,7 +1628,7 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
@@ -1637,7 +1637,7 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
@@ -1646,7 +1646,7 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
@@ -1655,7 +1655,7 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
@@ -1664,7 +1664,7 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
@@ -1673,7 +1673,7 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
@@ -1682,7 +1682,7 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
@@ -1691,7 +1691,7 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
 vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
@@ -1700,7 +1700,7 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
@@ -1709,7 +1709,7 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
 vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
@@ -1718,7 +1718,7 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
 vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
@@ -3446,7 +3446,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
@@ -3455,7 +3455,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
@@ -3464,7 +3464,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
@@ -3473,7 +3473,7 @@ vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) {
@@ -3482,7 +3482,7 @@ vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) {
@@ -3491,7 +3491,7 @@ vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) {
@@ -3500,7 +3500,7 @@ vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
@@ -3509,7 +3509,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex,
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
@@ -3518,7 +3518,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) {
@@ -3527,7 +3527,7 @@ vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) {
@@ -3536,7 +3536,7 @@ vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) {
@@ -3545,7 +3545,7 @@ vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> poison, ptr [[BASE:%.*]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
 //
 vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) {
@@ -3554,7 +3554,7 @@ vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
@@ -3563,7 +3563,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) {
@@ -3572,7 +3572,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) {
@@ -3581,7 +3581,7 @@ vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) {
@@ -3590,7 +3590,7 @@ vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> poison, ptr [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
 //
 vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) {
@@ -3599,7 +3599,7 @@ vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> undef, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> poison, ptr [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
 //
 vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) {
@@ -3608,7 +3608,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> undef, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> poison, ptr [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
 //
 vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) {
@@ -3617,7 +3617,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> undef, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> poison, ptr [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
 //
 vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) {
@@ -3626,7 +3626,7 @@ vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz
 
 // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> undef, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> poison, ptr [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
 //
 vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) {
@@ -3851,7 +3851,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t merge, const float *base,
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3860,7 +3860,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_ta(const int32_t *base, vuint32mf2_t bindex,
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3869,7 +3869,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_ta(const uint32_t *base, vuint32mf2_t bindex
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2_ta(const float *base, vuint32mf2_t bindex, size_t vl) {
@@ -3932,7 +3932,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, c
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vint32mf2_t test_vluxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3941,7 +3941,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tama(vbool64_t mask, const int32_t *base, vui
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
 vuint32mf2_t test_vluxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
@@ -3950,7 +3950,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tama(vbool64_t mask, const uint32_t *base, v
 
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> poison, ptr [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
 vfloat32mf2_t test_vluxei32_v_f32mf2_tama(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
index d1ebf3bb02bca..590483292c27f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmaxu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
index 97210b833d7ca..b359116f9dc61 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
@@ -581,7 +581,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -590,7 +590,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
index c7eaa4ed9e12a..8f7bb78693328 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
@@ -548,7 +548,7 @@ vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -557,7 +557,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
index 779a529ded314..e6f87789761b2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
@@ -548,7 +548,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -557,7 +557,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
index 4f27e9533354b..3e50169aaa6c8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
@@ -581,7 +581,7 @@ vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -590,7 +590,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
index bff325f7a600a..006e7817d0ceb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
@@ -581,7 +581,7 @@ vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -590,7 +590,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
index 844d3abfacb9f..ccc3e9de1742e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
@@ -581,7 +581,7 @@ vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -590,7 +590,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat
 
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
index 649cf99716dd7..947c659eae85a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vminu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
index 316c185b1d126..1ffa7a5111fcd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
@@ -139,7 +139,7 @@ vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
index 3afc9803004d1..a094ef99eac80 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
@@ -1702,7 +1702,7 @@ vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1711,7 +1711,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1720,7 +1720,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1729,7 +1729,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
index 87d703ef4badd..c83f7688bae2c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c
@@ -1591,7 +1591,7 @@ vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
 
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1600,7 +1600,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1609,7 +1609,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1618,7 +1618,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
index b8f0ca65f4422..7d3768c6eb4f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
@@ -1591,7 +1591,7 @@ vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
 
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1600,7 +1600,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1609,7 +1609,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1618,7 +1618,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
index 8792ee4609549..5adb636389f74 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
@@ -139,7 +139,7 @@ vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsif_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
index ba1f504d60923..7e7cd099d51f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
@@ -1717,7 +1717,7 @@ vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1726,7 +1726,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
index 89f3ac0d5a9ea..01cf1cfb4a46c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
@@ -1717,7 +1717,7 @@ vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1726,7 +1726,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1735,7 +1735,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1744,7 +1744,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
index b061142994ff4..296a5d82cd154 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
@@ -1702,7 +1702,7 @@ vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1711,7 +1711,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf
 
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1720,7 +1720,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1729,7 +1729,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
 //
 vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
index c5d7bd6b22193..8c0e650204948 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
@@ -139,7 +139,7 @@ vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmsof_m_b4_ma(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> poison, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
index f1c232917edb4..923250be0c39b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
@@ -9,7 +9,7 @@
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -18,7 +18,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -27,7 +27,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -36,7 +36,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -45,7 +45,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -54,7 +54,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -63,7 +63,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -72,7 +72,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -81,7 +81,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -90,7 +90,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -99,7 +99,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -108,7 +108,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -117,7 +117,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -126,7 +126,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -135,7 +135,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -144,7 +144,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -153,7 +153,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -162,7 +162,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
@@ -171,7 +171,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
@@ -207,7 +207,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -216,7 +216,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
index 2259b5db135fb..77ccc953cb906 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -1590,7 +1590,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1599,7 +1599,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -1608,7 +1608,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1617,7 +1617,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -1662,7 +1662,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1671,7 +1671,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
@@ -3624,7 +3624,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3633,7 +3633,7 @@ vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3642,7 +3642,7 @@ vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3651,7 +3651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3660,7 +3660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3669,7 +3669,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3678,7 +3678,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3687,7 +3687,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3696,7 +3696,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3705,7 +3705,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2_ta(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3894,7 +3894,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint3
 
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3903,7 +3903,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3912,7 +3912,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3921,7 +3921,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3930,7 +3930,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -3939,7 +3939,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -3948,7 +3948,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3957,7 +3957,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -3966,7 +3966,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -3975,7 +3975,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmulhsu_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
index 47fb41a45327e..689ddd6b56c6f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
@@ -17,7 +17,7 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(<vscale x 1 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(<vscale x 1 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) {
@@ -26,7 +26,7 @@ vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
@@ -35,7 +35,7 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(<vscale x 2 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(<vscale x 2 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) {
@@ -44,7 +44,7 @@ vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
@@ -53,7 +53,7 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(<vscale x 4 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(<vscale x 4 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) {
@@ -62,7 +62,7 @@ vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
@@ -71,7 +71,7 @@ vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) {
@@ -80,7 +80,7 @@ vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
@@ -89,7 +89,7 @@ vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(<vscale x 16 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(<vscale x 16 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) {
@@ -98,7 +98,7 @@ vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
@@ -107,7 +107,7 @@ vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(<vscale x 32 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(<vscale x 32 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) {
@@ -116,7 +116,7 @@ vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
@@ -125,7 +125,7 @@ vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(<vscale x 64 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(<vscale x 64 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) {
@@ -134,7 +134,7 @@ vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
@@ -143,7 +143,7 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(<vscale x 1 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(<vscale x 1 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) {
@@ -152,7 +152,7 @@ vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
@@ -161,7 +161,7 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(<vscale x 2 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(<vscale x 2 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) {
@@ -170,7 +170,7 @@ vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
@@ -179,7 +179,7 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(<vscale x 4 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(<vscale x 4 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) {
@@ -188,7 +188,7 @@ vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
@@ -197,7 +197,7 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(<vscale x 8 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(<vscale x 8 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) {
@@ -206,7 +206,7 @@ vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
@@ -215,7 +215,7 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(<vscale x 16 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(<vscale x 16 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) {
@@ -224,7 +224,7 @@ vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
@@ -233,7 +233,7 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(<vscale x 32 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(<vscale x 32 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) {
@@ -242,7 +242,7 @@ vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
@@ -251,7 +251,7 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) {
@@ -260,7 +260,7 @@ vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
@@ -269,7 +269,7 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(<vscale x 2 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(<vscale x 2 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) {
@@ -278,7 +278,7 @@ vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
@@ -287,7 +287,7 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(<vscale x 4 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(<vscale x 4 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) {
@@ -296,7 +296,7 @@ vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
@@ -305,7 +305,7 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(<vscale x 8 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) {
@@ -314,7 +314,7 @@ vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
@@ -323,7 +323,7 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(<vscale x 16 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(<vscale x 16 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) {
@@ -332,7 +332,7 @@ vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
@@ -341,7 +341,7 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(<vscale x 1 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(<vscale x 1 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) {
@@ -350,7 +350,7 @@ vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
@@ -359,7 +359,7 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(<vscale x 2 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(<vscale x 2 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) {
@@ -368,7 +368,7 @@ vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
@@ -377,7 +377,7 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(<vscale x 4 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(<vscale x 4 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) {
@@ -386,7 +386,7 @@ vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
@@ -395,7 +395,7 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(<vscale x 8 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(<vscale x 8 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) {
@@ -404,7 +404,7 @@ vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
@@ -413,7 +413,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(<vscale x 1 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(<vscale x 1 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) {
@@ -422,7 +422,7 @@ vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
@@ -431,7 +431,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(<vscale x 2 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(<vscale x 2 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) {
@@ -440,7 +440,7 @@ vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
@@ -449,7 +449,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(<vscale x 4 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(<vscale x 4 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) {
@@ -458,7 +458,7 @@ vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
@@ -467,7 +467,7 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) {
@@ -476,7 +476,7 @@ vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
@@ -485,7 +485,7 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(<vscale x 16 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(<vscale x 16 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) {
@@ -494,7 +494,7 @@ vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
@@ -503,7 +503,7 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(<vscale x 32 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(<vscale x 32 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) {
@@ -512,7 +512,7 @@ vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
@@ -521,7 +521,7 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(<vscale x 64 x i8> undef, i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(<vscale x 64 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) {
@@ -530,7 +530,7 @@ vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
@@ -539,7 +539,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(<vscale x 1 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(<vscale x 1 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) {
@@ -548,7 +548,7 @@ vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
@@ -557,7 +557,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(<vscale x 2 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(<vscale x 2 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(<vscale x 4 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(<vscale x 4 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(<vscale x 8 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(<vscale x 8 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(<vscale x 16 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(<vscale x 16 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(<vscale x 32 x i16> undef, i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(<vscale x 32 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
@@ -647,7 +647,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) {
@@ -656,7 +656,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
@@ -665,7 +665,7 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(<vscale x 2 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(<vscale x 2 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) {
@@ -674,7 +674,7 @@ vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
@@ -683,7 +683,7 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(<vscale x 4 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(<vscale x 4 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) {
@@ -692,7 +692,7 @@ vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
@@ -701,7 +701,7 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(<vscale x 8 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) {
@@ -710,7 +710,7 @@ vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
@@ -719,7 +719,7 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(<vscale x 16 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(<vscale x 16 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) {
@@ -728,7 +728,7 @@ vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
@@ -737,7 +737,7 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(<vscale x 1 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(<vscale x 1 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) {
@@ -746,7 +746,7 @@ vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
@@ -755,7 +755,7 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(<vscale x 2 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(<vscale x 2 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) {
@@ -764,7 +764,7 @@ vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
@@ -773,7 +773,7 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(<vscale x 4 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(<vscale x 4 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) {
@@ -782,7 +782,7 @@ vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
@@ -791,7 +791,7 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(<vscale x 8 x i64> undef, i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(<vscale x 8 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) {
@@ -800,7 +800,7 @@ vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
@@ -809,7 +809,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
@@ -818,7 +818,7 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
@@ -827,7 +827,7 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
@@ -836,7 +836,7 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
@@ -845,7 +845,7 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
@@ -854,7 +854,7 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
@@ -863,7 +863,7 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
@@ -872,7 +872,7 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
@@ -1645,7 +1645,7 @@ vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) {
@@ -1654,7 +1654,7 @@ vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) {
@@ -1663,7 +1663,7 @@ vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) {
@@ -1672,7 +1672,7 @@ vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) {
@@ -1681,7 +1681,7 @@ vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) {
@@ -1690,7 +1690,7 @@ vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) {
@@ -1735,7 +1735,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2_tu (vuint32mf2_t merge, uint32_t src, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_v_i32mf2_ta (vint32mf2_t src, size_t vl) {
@@ -1744,7 +1744,7 @@ vint32mf2_t test_vmv_v_v_i32mf2_ta (vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_v_x_i32mf2_ta (int32_t src, size_t vl) {
@@ -1753,7 +1753,7 @@ vint32mf2_t test_vmv_v_x_i32mf2_ta (int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_v_u32mf2_ta (vuint32mf2_t src, size_t vl) {
@@ -1762,7 +1762,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2_ta (vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_v_x_u32mf2_ta (uint32_t src, size_t vl) {
@@ -1780,7 +1780,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, si
 
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vmv_v_v_f32mf2_ta (vfloat32mf2_t src, size_t vl) {
@@ -1807,7 +1807,7 @@ vuint32mf2_t test_vmv_s_x_u32mf2_tu (vuint32mf2_t merge, uint32_t src, size_t vl
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmv_s_x_i32mf2_ta (int32_t src, size_t vl) {
@@ -1816,7 +1816,7 @@ vint32mf2_t test_vmv_s_x_i32mf2_ta (int32_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> undef, i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmv_s_x_u32mf2_ta (uint32_t src, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
index 806fc1cefb668..37d2d396d6469 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
@@ -124,7 +124,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -133,7 +133,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
@@ -143,7 +143,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -152,7 +152,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -161,7 +161,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
@@ -216,7 +216,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -225,7 +225,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -234,7 +234,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -243,7 +243,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -252,7 +252,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
@@ -289,7 +289,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -298,7 +298,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
@@ -308,7 +308,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -317,7 +317,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
@@ -400,7 +400,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -409,7 +409,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
@@ -419,7 +419,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -428,7 +428,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
@@ -438,7 +438,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
@@ -457,7 +457,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -466,7 +466,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
@@ -476,7 +476,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -485,7 +485,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
@@ -495,7 +495,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -504,7 +504,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
@@ -514,7 +514,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -523,7 +523,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
@@ -533,7 +533,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -542,7 +542,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
@@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -1227,7 +1227,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1236,7 +1236,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
@@ -1245,7 +1245,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1254,7 +1254,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -1335,7 +1335,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vui
 
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1344,7 +1344,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
@@ -1353,7 +1353,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1362,7 +1362,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnclipu_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
index ed8193536ae04..97173e100340d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m
 
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnsra_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
index bd7f5d5e1da30..52103553329c4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnsrl_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
index ee7c81043d570..f944761ea0bd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32
 
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
index 13475d0dfc413..e77d91ae3dc48 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
index fec4eb676d5d4..2b6fbd208ef08 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vec
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
index 94d170c144868..40e291dc2be60 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vec
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
index c5713667b7865..8fa4a2c281fa0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vecto
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar,
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, v
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, v
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
index b2d3223a2969e..893cf0ad452f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
index ac7e7f6a0e2c9..89769f3e67689 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
@@ -948,7 +948,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vect
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -957,7 +957,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
@@ -984,7 +984,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge,
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
@@ -993,7 +993,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
index dcd3527572378..c5e7c7d24fa2b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vremu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
index dc6f68d0e01ef..290d0157d96cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
@@ -18,7 +18,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
@@ -27,7 +27,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
@@ -37,7 +37,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
@@ -46,7 +46,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
@@ -56,7 +56,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
@@ -65,7 +65,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -74,7 +74,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
@@ -83,7 +83,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -92,7 +92,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
@@ -101,7 +101,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -110,7 +110,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
@@ -119,7 +119,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -128,7 +128,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
@@ -137,7 +137,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
@@ -147,7 +147,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
@@ -156,7 +156,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
@@ -166,7 +166,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
@@ -175,7 +175,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
@@ -185,7 +185,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
@@ -194,7 +194,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
@@ -204,7 +204,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
@@ -213,7 +213,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
@@ -223,7 +223,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
@@ -232,7 +232,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
@@ -242,7 +242,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
@@ -251,7 +251,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
@@ -261,7 +261,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
@@ -270,7 +270,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
@@ -280,7 +280,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
@@ -289,7 +289,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
@@ -299,7 +299,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
@@ -308,7 +308,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
@@ -318,7 +318,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
@@ -327,7 +327,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
@@ -337,7 +337,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
@@ -346,7 +346,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
@@ -356,7 +356,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
@@ -365,7 +365,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
@@ -375,7 +375,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
@@ -384,7 +384,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
@@ -394,7 +394,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
@@ -403,7 +403,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
@@ -413,7 +413,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
@@ -422,7 +422,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
@@ -432,7 +432,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
@@ -441,7 +441,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
@@ -451,7 +451,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
@@ -460,7 +460,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
@@ -470,7 +470,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
@@ -479,7 +479,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
@@ -488,7 +488,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
@@ -497,7 +497,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
@@ -506,7 +506,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
@@ -515,7 +515,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
@@ -524,7 +524,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
@@ -533,7 +533,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
@@ -542,7 +542,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
@@ -551,7 +551,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
@@ -561,7 +561,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
@@ -571,7 +571,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
@@ -581,7 +581,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
@@ -591,7 +591,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
@@ -601,7 +601,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
@@ -610,7 +610,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
@@ -620,7 +620,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
@@ -639,7 +639,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
@@ -648,7 +648,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
@@ -658,7 +658,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
@@ -677,7 +677,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
@@ -687,7 +687,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
@@ -697,7 +697,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
@@ -706,7 +706,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
@@ -716,7 +716,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
@@ -725,7 +725,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
@@ -735,7 +735,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
@@ -744,7 +744,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
@@ -754,7 +754,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
@@ -763,7 +763,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
@@ -773,7 +773,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
@@ -782,7 +782,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
@@ -792,7 +792,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
@@ -801,7 +801,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
@@ -811,7 +811,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
@@ -820,7 +820,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
@@ -830,7 +830,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
@@ -839,7 +839,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
@@ -849,7 +849,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
@@ -859,7 +859,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
@@ -869,7 +869,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
@@ -878,7 +878,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
@@ -888,7 +888,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
@@ -897,7 +897,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
@@ -907,7 +907,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
@@ -916,7 +916,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
@@ -926,7 +926,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
@@ -935,7 +935,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
@@ -945,7 +945,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
@@ -954,7 +954,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
@@ -964,7 +964,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
@@ -973,7 +973,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
@@ -983,7 +983,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
@@ -992,7 +992,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
@@ -1002,7 +1002,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
@@ -1011,7 +1011,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
@@ -1021,7 +1021,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
@@ -1031,7 +1031,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
@@ -1041,7 +1041,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
@@ -1078,7 +1078,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
@@ -1088,7 +1088,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
@@ -1098,7 +1098,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
@@ -1108,7 +1108,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
@@ -1118,7 +1118,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
@@ -1128,7 +1128,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
@@ -1138,7 +1138,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
@@ -1148,7 +1148,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
@@ -1158,7 +1158,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
@@ -1168,7 +1168,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
@@ -1178,7 +1178,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
@@ -1188,7 +1188,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
@@ -1198,7 +1198,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
@@ -1208,7 +1208,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
@@ -1218,7 +1218,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
@@ -1228,7 +1228,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
@@ -1238,7 +1238,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
@@ -1258,7 +1258,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
@@ -1268,7 +1268,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
@@ -1278,7 +1278,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -1288,7 +1288,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -1298,7 +1298,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -1308,7 +1308,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -1318,7 +1318,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -1328,7 +1328,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -1338,7 +1338,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -1348,7 +1348,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
@@ -1358,7 +1358,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
@@ -1368,7 +1368,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
@@ -1378,7 +1378,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
@@ -1388,7 +1388,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
@@ -1398,7 +1398,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
@@ -1408,7 +1408,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
@@ -1418,7 +1418,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
@@ -1428,7 +1428,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
@@ -1438,7 +1438,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
@@ -1448,7 +1448,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
@@ -1458,7 +1458,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
@@ -1468,7 +1468,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
@@ -1478,7 +1478,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
@@ -1488,7 +1488,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
@@ -1498,7 +1498,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
@@ -1508,7 +1508,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
@@ -3212,7 +3212,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
@@ -3221,7 +3221,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, si
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) {
@@ -3230,7 +3230,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t v
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
@@ -3239,7 +3239,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, si
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) {
@@ -3248,7 +3248,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t v
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
@@ -3257,7 +3257,7 @@ vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) {
@@ -3266,7 +3266,7 @@ vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
@@ -3275,7 +3275,7 @@ vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) {
@@ -3284,7 +3284,7 @@ vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
@@ -3293,7 +3293,7 @@ vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) {
@@ -3302,7 +3302,7 @@ vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
@@ -3311,7 +3311,7 @@ vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) {
@@ -3320,7 +3320,7 @@ vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3329,7 +3329,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -3338,7 +3338,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -3347,7 +3347,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -3356,7 +3356,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -3365,7 +3365,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -3617,7 +3617,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3626,7 +3626,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl) {
@@ -3635,7 +3635,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3644,7 +3644,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, si
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t vl) {
@@ -3653,7 +3653,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t v
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3662,7 +3662,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index,
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t vl) {
@@ -3671,7 +3671,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3680,7 +3680,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, si
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3689,7 +3689,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2_ta(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3860,7 +3860,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mer
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3869,7 +3869,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) {
@@ -3878,7 +3878,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3887,7 +3887,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) {
@@ -3896,7 +3896,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size
 
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
@@ -3905,7 +3905,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vu
 
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) {
@@ -3914,7 +3914,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, si
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3923,7 +3923,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vu
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
@@ -3932,7 +3932,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
index 69b32692edf78..728c788416b07 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vrsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
index 83dbbc046b71a..a155486a1e56d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
index a674bac1ad5a1..d1d991b345106 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
@@ -16,7 +16,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
@@ -26,7 +26,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
@@ -36,7 +36,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
@@ -46,7 +46,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
@@ -56,7 +56,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
@@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
@@ -76,7 +76,7 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
@@ -86,7 +86,7 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
@@ -96,7 +96,7 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
@@ -106,7 +106,7 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
@@ -116,7 +116,7 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
@@ -126,7 +126,7 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
@@ -136,7 +136,7 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
@@ -146,7 +146,7 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
@@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
@@ -166,7 +166,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
@@ -176,7 +176,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
@@ -186,7 +186,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
@@ -196,7 +196,7 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
@@ -206,7 +206,7 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
@@ -216,7 +216,7 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
@@ -226,7 +226,7 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
@@ -236,7 +236,7 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
@@ -246,7 +246,7 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
@@ -256,7 +256,7 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
@@ -266,7 +266,7 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
@@ -276,7 +276,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
@@ -286,7 +286,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
@@ -296,7 +296,7 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
@@ -306,7 +306,7 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
@@ -316,7 +316,7 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
@@ -326,7 +326,7 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
@@ -336,7 +336,7 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
@@ -346,7 +346,7 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
@@ -356,7 +356,7 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
@@ -366,7 +366,7 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
@@ -376,7 +376,7 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
@@ -386,7 +386,7 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
@@ -396,7 +396,7 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
@@ -406,7 +406,7 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
@@ -416,7 +416,7 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
@@ -426,7 +426,7 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
@@ -436,7 +436,7 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
@@ -446,7 +446,7 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -456,7 +456,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
@@ -466,7 +466,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -476,7 +476,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
@@ -486,7 +486,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
@@ -496,7 +496,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
@@ -506,7 +506,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
@@ -516,7 +516,7 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
@@ -526,7 +526,7 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
@@ -536,7 +536,7 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
@@ -546,7 +546,7 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
@@ -556,7 +556,7 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
@@ -566,7 +566,7 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
@@ -576,7 +576,7 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
@@ -586,7 +586,7 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -596,7 +596,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
@@ -606,7 +606,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -616,7 +616,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
@@ -626,7 +626,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
@@ -636,7 +636,7 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
@@ -646,7 +646,7 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
@@ -656,7 +656,7 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
@@ -666,7 +666,7 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
@@ -676,7 +676,7 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
@@ -686,7 +686,7 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
@@ -696,7 +696,7 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
@@ -706,7 +706,7 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -716,7 +716,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
@@ -726,7 +726,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
@@ -736,7 +736,7 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
@@ -746,7 +746,7 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
@@ -756,7 +756,7 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
@@ -766,7 +766,7 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
@@ -776,7 +776,7 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
@@ -786,7 +786,7 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
@@ -796,7 +796,7 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
@@ -806,7 +806,7 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
@@ -816,7 +816,7 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
@@ -826,7 +826,7 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
@@ -836,7 +836,7 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
@@ -846,7 +846,7 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
@@ -856,7 +856,7 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
@@ -866,7 +866,7 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
@@ -876,7 +876,7 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2,
@@ -922,7 +922,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint3
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) {
@@ -931,7 +931,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) {
@@ -940,7 +940,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borr
 
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) {
@@ -949,7 +949,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64
 
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsbc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
index 8d6919285c5d3..206e3537e0722 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
@@ -69,7 +69,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
@@ -78,7 +78,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
@@ -87,7 +87,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
@@ -96,7 +96,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
@@ -105,7 +105,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
@@ -114,7 +114,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
@@ -123,7 +123,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
@@ -132,7 +132,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
@@ -141,7 +141,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
@@ -159,7 +159,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
@@ -186,7 +186,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
@@ -195,7 +195,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
@@ -204,7 +204,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
@@ -213,7 +213,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
@@ -222,7 +222,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
@@ -231,7 +231,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
@@ -547,7 +547,7 @@ vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1_ta(vint32mf2_t op1, size_t vl) {
@@ -574,7 +574,7 @@ vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsext_vf2_i64m1_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
index 29c146dfec32c..d9f37921855e1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
@@ -217,7 +217,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
@@ -227,7 +227,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -283,7 +283,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -293,7 +293,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -303,7 +303,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -313,7 +313,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -323,7 +323,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -333,7 +333,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -343,7 +343,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -353,7 +353,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -363,7 +363,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -373,7 +373,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -383,7 +383,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -393,7 +393,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -403,7 +403,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -413,7 +413,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
@@ -924,7 +924,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) {
@@ -933,7 +933,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) {
@@ -978,7 +978,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge,
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
@@ -987,7 +987,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int
 
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1down_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
index 76df4a1642a63..eaa90dccd13cf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
@@ -42,7 +42,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
@@ -51,7 +51,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
@@ -60,7 +60,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
@@ -69,7 +69,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
@@ -79,7 +79,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
@@ -89,7 +89,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
@@ -98,7 +98,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
@@ -107,7 +107,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
@@ -116,7 +116,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
@@ -125,7 +125,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
@@ -135,7 +135,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
@@ -144,7 +144,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
@@ -153,7 +153,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
@@ -162,7 +162,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
@@ -171,7 +171,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
@@ -180,7 +180,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
@@ -189,7 +189,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
@@ -198,7 +198,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
@@ -207,7 +207,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
@@ -216,7 +216,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
@@ -225,7 +225,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
@@ -234,7 +234,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
@@ -243,7 +243,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
@@ -252,7 +252,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
@@ -261,7 +261,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
@@ -270,7 +270,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
@@ -280,7 +280,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
@@ -290,7 +290,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
@@ -310,7 +310,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
@@ -320,7 +320,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
@@ -330,7 +330,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
@@ -340,7 +340,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
@@ -350,7 +350,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
@@ -360,7 +360,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
@@ -370,7 +370,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
@@ -380,7 +380,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
@@ -390,7 +390,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
@@ -400,7 +400,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
@@ -410,7 +410,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
@@ -899,7 +899,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, u
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) {
@@ -908,7 +908,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t v
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) {
@@ -953,7 +953,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, v
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
@@ -962,7 +962,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32
 
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslide1up_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
index 0297c73f43e6a..1d1a1b6500efd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shi
 
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsll_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
index 80c0dc08f06cd..bb7fdd2f3204b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
index 936c2368d9d59..d7473b4eae436 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32m
 
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
index b2b750c166034..931d44619047c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -816,7 +816,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t sh
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -825,7 +825,7 @@ vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t v
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -870,7 +870,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf
 
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -879,7 +879,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
index 3e7b3dd0c37ff..2a2d4bdab2d33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
@@ -141,7 +141,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -150,7 +150,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
@@ -159,7 +159,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -168,7 +168,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -177,7 +177,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -186,7 +186,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -195,7 +195,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -204,7 +204,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -213,7 +213,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -222,7 +222,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -231,7 +231,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -240,7 +240,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -249,7 +249,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -258,7 +258,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -267,7 +267,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -276,7 +276,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -285,7 +285,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -294,7 +294,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -303,7 +303,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -312,7 +312,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -321,7 +321,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -330,7 +330,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -339,7 +339,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -348,7 +348,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -357,7 +357,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -366,7 +366,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -375,7 +375,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -384,7 +384,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -393,7 +393,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -816,7 +816,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -825,7 +825,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -870,7 +870,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -879,7 +879,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
index b15aa7d8baf2f..2d4351fe842e8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
@@ -866,7 +866,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t s
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -875,7 +875,7 @@ vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) {
@@ -920,7 +920,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32m
 
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -929,7 +929,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
index 9c0f899e24eae..284a3be2e0e70 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
@@ -15,7 +15,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
@@ -24,7 +24,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
@@ -33,7 +33,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
@@ -42,7 +42,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
@@ -51,7 +51,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
@@ -60,7 +60,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
@@ -69,7 +69,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
@@ -78,7 +78,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
@@ -87,7 +87,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
@@ -96,7 +96,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
@@ -105,7 +105,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
@@ -114,7 +114,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
@@ -123,7 +123,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
@@ -132,7 +132,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
@@ -142,7 +142,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
@@ -151,7 +151,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
@@ -161,7 +161,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
@@ -170,7 +170,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
@@ -179,7 +179,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
@@ -188,7 +188,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
@@ -197,7 +197,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
@@ -206,7 +206,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
@@ -215,7 +215,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
@@ -224,7 +224,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
@@ -233,7 +233,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
@@ -242,7 +242,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
@@ -252,7 +252,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -261,7 +261,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
@@ -270,7 +270,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
@@ -279,7 +279,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
@@ -288,7 +288,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
@@ -297,7 +297,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
@@ -306,7 +306,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
@@ -315,7 +315,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
@@ -324,7 +324,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
@@ -333,7 +333,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
@@ -342,7 +342,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
@@ -351,7 +351,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
@@ -360,7 +360,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
@@ -369,7 +369,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
@@ -378,7 +378,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
@@ -387,7 +387,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
@@ -396,7 +396,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
@@ -881,7 +881,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -890,7 +890,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) {
@@ -935,7 +935,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint
 
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
@@ -944,7 +944,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
index c04ca5980b590..02c59c182036c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -538,7 +538,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -547,7 +547,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -557,7 +557,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -566,7 +566,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -575,7 +575,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -593,7 +593,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -602,7 +602,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -611,7 +611,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -629,7 +629,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -638,7 +638,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -648,7 +648,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -657,7 +657,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -666,7 +666,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -675,7 +675,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -684,7 +684,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -693,7 +693,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -702,7 +702,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -711,7 +711,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -729,7 +729,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -738,7 +738,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -747,7 +747,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -756,7 +756,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -765,7 +765,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -774,7 +774,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -783,7 +783,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -792,7 +792,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1741,7 +1741,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1750,7 +1750,7 @@ vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1759,7 +1759,7 @@ vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1768,7 +1768,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1849,7 +1849,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuin
 
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1858,7 +1858,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2
 
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t o
 
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1876,7 +1876,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint3
 
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vssubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
index 50fa379627243..33581803335b0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
index f08ce542f015a..674b5890f828b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
@@ -2394,7 +2394,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2403,7 +2403,7 @@ vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2412,7 +2412,7 @@ vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2421,7 +2421,7 @@ vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2430,7 +2430,7 @@ vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2439,7 +2439,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2448,7 +2448,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2457,7 +2457,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1_ta(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -2610,7 +2610,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64
 
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2619,7 +2619,7 @@ vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2628,7 +2628,7 @@ vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2637,7 +2637,7 @@ vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2646,7 +2646,7 @@ vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2,
 
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2655,7 +2655,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2664,7 +2664,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2673,7 +2673,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwaddu_wx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
index e348de3750d1b..6d0614fc6f85d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
@@ -798,7 +798,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1680,7 +1680,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1689,7 +1689,7 @@ vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1698,7 +1698,7 @@ vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1707,7 +1707,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1716,7 +1716,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1725,7 +1725,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1_ta(vint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1842,7 +1842,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf
 
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1851,7 +1851,7 @@ vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1860,7 +1860,7 @@ vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1869,7 +1869,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1878,7 +1878,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1887,7 +1887,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmulsu_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
index 096483b475fd2..20b8e694a596c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
@@ -780,7 +780,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t ve
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
@@ -789,7 +789,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scala
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
@@ -816,7 +816,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
@@ -825,7 +825,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector,
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
index 0855f4b74d277..e2a686ef9e4e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
@@ -556,7 +556,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -565,7 +565,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
@@ -575,7 +575,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
@@ -584,7 +584,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
@@ -594,7 +594,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -603,7 +603,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
@@ -613,7 +613,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
@@ -622,7 +622,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -631,7 +631,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
@@ -649,7 +649,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
@@ -658,7 +658,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -667,7 +667,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -676,7 +676,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
@@ -685,7 +685,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
@@ -694,7 +694,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -703,7 +703,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -712,7 +712,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
@@ -721,7 +721,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
@@ -730,7 +730,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -739,7 +739,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -748,7 +748,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
@@ -757,7 +757,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
@@ -766,7 +766,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
@@ -776,7 +776,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -785,7 +785,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
@@ -814,7 +814,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -823,7 +823,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
@@ -832,7 +832,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
@@ -841,7 +841,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -850,7 +850,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -859,7 +859,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
@@ -868,7 +868,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
@@ -877,7 +877,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -886,7 +886,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -895,7 +895,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
@@ -904,7 +904,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
@@ -913,7 +913,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -922,7 +922,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -931,7 +931,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
@@ -949,7 +949,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
@@ -959,7 +959,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -968,7 +968,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -977,7 +977,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -986,7 +986,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -995,7 +995,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -1004,7 +1004,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
@@ -1013,7 +1013,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
@@ -1022,7 +1022,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1031,7 +1031,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
@@ -1049,7 +1049,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
@@ -1058,7 +1058,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1067,7 +1067,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -1076,7 +1076,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
@@ -1085,7 +1085,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
@@ -2394,7 +2394,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2403,7 +2403,7 @@ vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2412,7 +2412,7 @@ vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2421,7 +2421,7 @@ vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2430,7 +2430,7 @@ vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2439,7 +2439,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2448,7 +2448,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2457,7 +2457,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1_ta (vuint64m1_t op1, uint32_t op2, size_t vl) {
@@ -2610,7 +2610,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint6
 
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -2619,7 +2619,7 @@ vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -2628,7 +2628,7 @@ vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
@@ -2637,7 +2637,7 @@ vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
@@ -2646,7 +2646,7 @@ vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2
 
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2655,7 +2655,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32
 
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -2664,7 +2664,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_
 
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
@@ -2673,7 +2673,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwsubu_wx_u64m1_tama (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
index f4d8e4333e789..de4e8c6f5861e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -15,7 +15,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
@@ -24,7 +24,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -33,7 +33,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
@@ -42,7 +42,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -51,7 +51,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -69,7 +69,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
@@ -78,7 +78,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -87,7 +87,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
@@ -96,7 +96,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -105,7 +105,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
@@ -114,7 +114,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -123,7 +123,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
@@ -132,7 +132,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -141,7 +141,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
@@ -150,7 +150,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -159,7 +159,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
@@ -168,7 +168,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -177,7 +177,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
@@ -186,7 +186,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -195,7 +195,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
@@ -204,7 +204,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -213,7 +213,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
@@ -222,7 +222,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -231,7 +231,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -249,7 +249,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -258,7 +258,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -267,7 +267,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
@@ -276,7 +276,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -285,7 +285,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
@@ -294,7 +294,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -303,7 +303,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
@@ -312,7 +312,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -321,7 +321,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
@@ -330,7 +330,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -339,7 +339,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
@@ -348,7 +348,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -357,7 +357,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
@@ -366,7 +366,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -375,7 +375,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
@@ -384,7 +384,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -393,7 +393,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
@@ -402,7 +402,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -411,7 +411,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -429,7 +429,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
@@ -438,7 +438,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -447,7 +447,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
@@ -456,7 +456,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -465,7 +465,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
@@ -474,7 +474,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -483,7 +483,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
@@ -492,7 +492,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -501,7 +501,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
@@ -510,7 +510,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -519,7 +519,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
@@ -528,7 +528,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -537,7 +537,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
@@ -546,7 +546,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -555,7 +555,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
@@ -564,7 +564,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -573,7 +573,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
@@ -582,7 +582,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -591,7 +591,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -609,7 +609,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
@@ -618,7 +618,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -627,7 +627,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
@@ -636,7 +636,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -645,7 +645,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -654,7 +654,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -663,7 +663,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
@@ -672,7 +672,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -681,7 +681,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
@@ -690,7 +690,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -699,7 +699,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
@@ -708,7 +708,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -717,7 +717,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
@@ -726,7 +726,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -735,7 +735,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
@@ -744,7 +744,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -753,7 +753,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
@@ -762,7 +762,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -771,7 +771,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -789,7 +789,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
@@ -1626,7 +1626,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1635,7 +1635,7 @@ vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1644,7 +1644,7 @@ vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1653,7 +1653,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
@@ -1734,7 +1734,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint3
 
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1743,7 +1743,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_
 
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
@@ -1752,7 +1752,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op
 
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1761,7 +1761,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32m
 
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vxor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
index 47c09009d4bbf..eefea61e31ec5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
@@ -15,7 +15,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
@@ -24,7 +24,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
@@ -33,7 +33,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
@@ -42,7 +42,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
@@ -51,7 +51,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> poison, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
@@ -69,7 +69,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
@@ -78,7 +78,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
@@ -87,7 +87,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
@@ -96,7 +96,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
@@ -105,7 +105,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
@@ -114,7 +114,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
@@ -123,7 +123,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
@@ -132,7 +132,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
@@ -141,7 +141,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
@@ -159,7 +159,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
@@ -168,7 +168,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> poison, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
@@ -195,7 +195,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
@@ -204,7 +204,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
@@ -213,7 +213,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
@@ -222,7 +222,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
@@ -231,7 +231,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> poison, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> poison, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> poison, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
@@ -547,7 +547,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, size_t
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1_ta(vuint32mf2_t op1, size_t vl) {
@@ -574,7 +574,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32
 
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vzext_vf2_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 560a6a7d66937..b69712e57f04f 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -197,11 +197,13 @@ void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) {
         OS << "  Ops.push_back(ConstantInt::get(Ops.back()->getType(),"
               " PolicyAttrs));\n";
       if (RVVI->hasMaskedOffOperand() && RVVI->getPolicyAttrs().isTAMAPolicy())
-        OS << "  Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
+        OS << "  Ops.insert(Ops.begin(), "
+              "llvm::PoisonValue::get(ResultType));\n";
       // Masked reduction cases.
       if (!RVVI->hasMaskedOffOperand() && RVVI->hasPassthruOperand() &&
           RVVI->getPolicyAttrs().isTAMAPolicy())
-        OS << "  Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
+        OS << "  Ops.insert(Ops.begin(), "
+              "llvm::PoisonValue::get(ResultType));\n";
     } else {
       OS << "  std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
     }
@@ -210,7 +212,7 @@ void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) {
       OS << "  Ops.push_back(ConstantInt::get(Ops.back()->getType(), "
             "PolicyAttrs));\n";
     else if (RVVI->hasPassthruOperand() && RVVI->getPolicyAttrs().isTAPolicy())
-      OS << "  Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
+      OS << "  Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));\n";
   }
 
   OS << "  IntrinsicTypes = {";


        


More information about the cfe-commits mailing list